diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ATen.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ATen.h new file mode 100644 index 0000000000000000000000000000000000000000..effdd469d19b91316aa21ae99d43055f49c950eb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ATen.h @@ -0,0 +1,37 @@ +#pragma once + +#if !defined(_MSC_VER) && __cplusplus < 201703L +#error C++17 or later compatible compiler is required to use ATen. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// TODO: try to remove this +// There is some back story, see https://github.com/pytorch/pytorch/issues/48684 +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h new file mode 100644 index 0000000000000000000000000000000000000000..0275ef099b03d714b916b9d0d09c4827724bf58c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h @@ -0,0 +1,153 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Defines the accumulation type for a scalar type. +// Example: +// using accscalar_t = acc_type; +// +// Accumulation types are an important concept in numeric computing +// because you frequently want to perform intermediate computations +// at a higher precision than the input and output precision, to avoid +// compounding internal rounding errors. Accumulation is the most +// well-known intermediate computation (it is of great importance for +// sum reduction and matrix multiply, for example), but in PyTorch +// acc_type ends up getting used for all sorts of other intermediate +// computations, so it perhaps would be more accurately (ahem) called an +// "accurate" type. acc_type is especially important for reduced +// precision operations like float16 and bfloat16, where relatively +// benign looking inputs can easily end up overflowing/underflowing. +// +// acc_type is parametrized by whether or not you are running on CUDA +// or not, because on CUDA double precision operations are expensive +// and so by default, we don't actually want to use double as an +// acc_type on CUDA. A lot of things are typed out below, but +// basically, the table is generated by a few rules: +// +// If bool: +// Use 'bool' as acc_type. +// If floating point: +// If CUDA, use 'float' as acc_type (unless scalar_t is double), +// otherwise (CPU) use 'double' +// If integral: +// Use 'int64_t' as acc_type +// +// You're not forced to use this template; if you happen to know +// something specific about your use case, you can specify your own +// desired behavior. This template, however, will give you a reasonable +// default that will work for all dtypes supported in PyTorch. + +#if defined(__CUDACC__) +#include +#include +#elif defined(__HIPCC__) +#include +#include +#endif + +namespace at { + +template +struct AccumulateTypeDevice {}; + +template +struct AccumulateType {}; + +template +struct AccumulateType { + using type = typename AccumulateTypeDevice::type; +}; + +template +struct AccumulateType { + using type = typename AccumulateTypeDevice::type; +}; + +template +using acc_type_device = typename AccumulateTypeDevice::type; + +template +using acc_type = typename AccumulateType::type; + +#define ACC_TYPE(t, acc_t, device_type) \ + template <> \ + struct AccumulateTypeDevice { \ + using type = acc_t; \ + }; +#define MPS_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::MPS) +#define CUDA_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CUDA) +#define CPU_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CPU) + +MPS_ACC_TYPE(BFloat16, float); +MPS_ACC_TYPE(Half, float); +MPS_ACC_TYPE(Float8_e5m2, float); +MPS_ACC_TYPE(Float8_e4m3fn, float); +MPS_ACC_TYPE(Float8_e5m2fnuz, float); +MPS_ACC_TYPE(Float8_e4m3fnuz, float); +MPS_ACC_TYPE(float, float); +MPS_ACC_TYPE(double, float); +MPS_ACC_TYPE(int8_t, int64_t); +MPS_ACC_TYPE(uint8_t, int64_t); +MPS_ACC_TYPE(char, int64_t); +MPS_ACC_TYPE(int16_t, int64_t); +MPS_ACC_TYPE(int32_t, int64_t); +MPS_ACC_TYPE(int64_t, int64_t); +MPS_ACC_TYPE(bool, bool); +MPS_ACC_TYPE(c10::complex, c10::complex); +MPS_ACC_TYPE(c10::complex, c10::complex); +MPS_ACC_TYPE(c10::complex, c10::complex); + +#if defined(__CUDACC__) || defined(__HIPCC__) +CUDA_ACC_TYPE(half, float); +#endif +CUDA_ACC_TYPE(BFloat16, float); +CUDA_ACC_TYPE(Half, float); +CUDA_ACC_TYPE(Float8_e5m2, float); +CUDA_ACC_TYPE(Float8_e4m3fn, float); +CUDA_ACC_TYPE(Float8_e5m2fnuz, float); +CUDA_ACC_TYPE(Float8_e4m3fnuz, float); +CUDA_ACC_TYPE(float, float); +CUDA_ACC_TYPE(double, double); +CUDA_ACC_TYPE(int8_t, int64_t); +CUDA_ACC_TYPE(uint8_t, int64_t); +CUDA_ACC_TYPE(char, int64_t); +CUDA_ACC_TYPE(int16_t, int64_t); +CUDA_ACC_TYPE(int32_t, int64_t); +CUDA_ACC_TYPE(int64_t, int64_t); +CUDA_ACC_TYPE(bool, bool); +CUDA_ACC_TYPE(c10::complex, c10::complex); +CUDA_ACC_TYPE(c10::complex, c10::complex); +CUDA_ACC_TYPE(c10::complex, c10::complex); + +CPU_ACC_TYPE(BFloat16, float); +CPU_ACC_TYPE(Half, float); +CPU_ACC_TYPE(Float8_e5m2, float); +CPU_ACC_TYPE(Float8_e4m3fn, float); +CPU_ACC_TYPE(Float8_e5m2fnuz, float); +CPU_ACC_TYPE(Float8_e4m3fnuz, float); +CPU_ACC_TYPE(float, double); +CPU_ACC_TYPE(double, double); +CPU_ACC_TYPE(int8_t, int64_t); +CPU_ACC_TYPE(uint8_t, int64_t); +CPU_ACC_TYPE(char, int64_t); +CPU_ACC_TYPE(int16_t, int64_t); +CPU_ACC_TYPE(int32_t, int64_t); +CPU_ACC_TYPE(int64_t, int64_t); +CPU_ACC_TYPE(bool, bool); +CPU_ACC_TYPE(c10::complex, c10::complex); +CPU_ACC_TYPE(c10::complex, c10::complex); +CPU_ACC_TYPE(c10::complex, c10::complex); + +TORCH_API c10::ScalarType toAccumulateType( + c10::ScalarType type, + c10::DeviceType device); +TORCH_API c10::ScalarType toAccumulateType(c10::ScalarType type, bool is_cuda); + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Backend.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Backend.h new file mode 100644 index 0000000000000000000000000000000000000000..9651469e190085d913ba9b5d1ca02085886fc4e1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Backend.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..cf621f34cc63735d7f7557f48146bb76467b8afc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h @@ -0,0 +1,33 @@ +#pragma once + +#include +#include + +// This file creates a fake allocator that just throws exceptions if +// it is actually used. + +// state passed to the allocator is the std::function called +// when the blob is release by ATen + +namespace at { + +static cpu_fixed_malloc(void*, ptrdiff_t) { + AT_ERROR("attempting to resize a tensor view of an external blob"); +} + +static cpu_fixed_realloc(void*, void*, ptrdiff_t) { + AT_ERROR("attempting to resize a tensor view of an external blob"); +} + +static cpu_fixed_free(void* state, void* allocation) { + auto on_release = static_cast*>(state); + (*on_release)(allocation); + delete on_release; +} + +static Allocator CPU_fixed_allocator = { + cpu_fixed_malloc, + cpu_fixed_realloc, + cpu_fixed_free}; + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..17c4ddd92f1d469abb771ed0392eed0df0508b1a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions_inl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..62c3a104f5390beffd12c9e72fe3356f7ab26c1b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions_inl.h @@ -0,0 +1,576 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..bdaee888e89bd9e1dd22c54a72d5d1b96affeda1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..b3b7c537ca6e8ad8fe7a30a5a7af956af7994d6e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..e68b8871d95bd8a3d013559d0fe79c450ad04c29 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h @@ -0,0 +1,323 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..fde0a471ac0135f1dcb55f78e10d0818c5cff2e2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..90ffa6b1eb4a9cc5d64851784113a739e385a77e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include + + + diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Context.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Context.h new file mode 100644 index 0000000000000000000000000000000000000000..931cd86e77d984cc3b69aca0516b7c3489320825 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Context.h @@ -0,0 +1,560 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace at { + +class Tensor; + +enum class TORCH_API Float32MatmulPrecision { HIGHEST, HIGH, MEDIUM }; + +class TORCH_API Context { + public: + Context(); + + const Generator& defaultGenerator(Device device) { + c10::DeviceType device_type = device.type(); + initCUDAIfNeeded(device_type); + initHIPIfNeeded(device_type); + if (device_type == at::kCPU) { + return at::detail::getDefaultCPUGenerator(); + } else if (device_type == at::kCUDA) { + return at::detail::getCUDAHooks().getDefaultCUDAGenerator(device.index()); + } else if (device_type == at::kMPS) { + return at::detail::getMPSHooks().getDefaultMPSGenerator(); + } else if (device_type == at::kXPU) { + return at::detail::getXPUHooks().getDefaultXPUGenerator(device.index()); + } else if (device_type == at::kIPU) { + return at::detail::getIPUHooks().getDefaultIPUGenerator(device.index()); + } else if (device_type == at::kPrivateUse1) { + return at::GetPrivateUse1HooksInterface()->getDefaultGenerator( + device.index()); + } else { + AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled."); + } + } + const AcceleratorHooksInterface& getAcceleratorHooksInterface( + c10::optional opt_device_type = c10::nullopt) { + c10::DeviceType device_type = opt_device_type.has_value() + ? opt_device_type.value() + : at::getAccelerator(true).value(); + if (device_type == at::kCUDA) { + return at::detail::getCUDAHooks(); + } else if (device_type == at::kMPS) { + return at::detail::getMPSHooks(); + } else if (device_type == at::kPrivateUse1) { + return at::detail::getPrivateUse1Hooks(); + } else { + AT_ERROR( + c10::DeviceTypeName(device_type), " device type not an accelerator."); + } + } + Device getDeviceFromPtr(void* data, c10::DeviceType device_type) { + initCUDAIfNeeded(device_type); + initHIPIfNeeded(device_type); + initXPUIfNeeded(device_type); + if (device_type == at::kCPU) { + return c10::DeviceType::CPU; + } else if (device_type == at::kCUDA) { + return at::detail::getCUDAHooks().getDeviceFromPtr(data); + } else if (device_type == at::kXPU) { + return at::detail::getXPUHooks().getDeviceFromPtr(data); + } else if (device_type == at::kPrivateUse1) { + return at::GetPrivateUse1HooksInterface()->getDeviceFromPtr(data); + } else { + AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled."); + } + } + static bool isPinnedPtr(const void* data) { + return detail::getCUDAHooks().isPinnedPtr(data); + } + static bool hasOpenMP(); + static bool hasMKL(); + static bool hasLAPACK(); + static bool hasMKLDNN(); + static bool hasMAGMA() { + return detail::getCUDAHooks().hasMAGMA(); + } + static bool hasCUDA() { + return detail::getCUDAHooks().hasCUDA(); + } + static bool hasMTIA() { + return detail::getMTIAHooks().hasMTIA(); + } + static bool hasCUDART() { + return detail::getCUDAHooks().hasCUDART(); + } + static long versionCUDART() { + return detail::getCUDAHooks().versionCUDART(); + } + static bool hasCuDNN() { + return detail::getCUDAHooks().hasCuDNN(); + } + static long versionCuDNN() { + return detail::getCUDAHooks().versionCuDNN(); + } + static bool hasCuSOLVER() { + return detail::getCUDAHooks().hasCuSOLVER(); + } + static bool hasHIP() { + return detail::getHIPHooks().hasHIP(); + } + static bool hasMPS() { + return detail::getMPSHooks().hasMPS(); + } + static bool hasIPU() { + return c10::impl::hasDeviceGuardImpl(c10::DeviceType::IPU); + } + static bool hasXLA() { + return c10::impl::hasDeviceGuardImpl(c10::DeviceType::XLA); + } + static bool hasXPU() { + return detail::getXPUHooks().hasXPU(); + } + static bool hasLazy() { + return c10::impl::hasDeviceGuardImpl(c10::DeviceType::Lazy); + } + static bool hasORT() { + return c10::impl::hasDeviceGuardImpl(c10::DeviceType::ORT); + } + // defined in header so that getNonVariableType has ability to inline + // call_once check. getNonVariableType is called fairly frequently + void lazyInitCUDA() { + c10::call_once(thc_init, [&] { detail::getCUDAHooks().initCUDA(); }); + } + void lazyInitHIP() { + c10::call_once(thh_init, [&] { detail::getHIPHooks().initHIP(); }); + } + void lazyInitXPU() { + c10::call_once(thx_init, [&] { detail::getXPUHooks().initXPU(); }); + } + void lazyInitPrivateUse1() { + c10::call_once(thp_init, [&] { + if (isPrivateUse1HooksRegistered()) { + at::GetPrivateUse1HooksInterface()->initPrivateUse1(); + } + }); + } + static const at::cuda::NVRTC& getNVRTC() { + return detail::getCUDAHooks().nvrtc(); + } + + static bool setFlushDenormal(bool on); + + // NB: This method is *purely* whether or not a user requested + // that CuDNN was enabled, it doesn't actually say anything about + // whether or not CuDNN is actually usable. Use cudnn_is_acceptable + // to test this instead + bool userEnabledCuDNN() const; + void setUserEnabledCuDNN(bool e); + bool userEnabledMkldnn() const; + void setUserEnabledMkldnn(bool e); + bool benchmarkCuDNN() const; + void setBenchmarkCuDNN(bool); + int benchmarkLimitCuDNN() const; + void setBenchmarkLimitCuDNN(int); + bool deterministicCuDNN() const; + void setDeterministicCuDNN(bool); + bool userEnabledNNPACK() const; + void setUserEnabledNNPACK(bool e); + + // Note [Disabling Fused SDP Kernels] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Flash and Memory Efficient SDP kernels are enabled by default. + // However, they can be disabled by setting + // at::globalContext().setUserEnabledFlashSDP(false) flag. + // This is useful for debugging purposes. For example, if you want to + // compare the performance of the flash SDP kernels with the unfused + // kernel, you can disable the flash SDP kernels. By disabling + // the math SDP kernel, you can force your code to use flash kernels. + // The math SDP kernel can be disabled by setting + // at::globalContext().setUserEnabledMathSDP(false) flag. + void setSDPUseFlash(bool); + bool userEnabledFlashSDP() const; + + void setSDPUseMemEfficient(bool); + bool userEnabledMemEfficientSDP() const; + + void setSDPUseMath(bool); + bool userEnabledMathSDP() const; + + void setSDPUseCuDNN(bool); + bool userEnabledCuDNNSDP() const; + + at::LinalgBackend linalgPreferredBackend() const; + void setLinalgPreferredBackend(at::LinalgBackend); + + // Note [Enabling Deterministic Operations] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Operations in PyTorch that normally act nondeterministically, but have an + // alternate deterministic implementation, should satisfy the following + // requirements: + // + // * Include this comment: "See Note [Enabling Deterministic Operations]" + // + // * Check the value of `at::globalContext().deterministicAlgorithms()` to + // toggle + // between nondeterministic and deterministic implementations. + // + // * Have an entry in the list of PyTorch operations that toggle between + // nondeterministic + // and deterministic implementations, in the docstring of + // `use_deterministic_algorithms()` in torch/__init__.py + // + // `example_func()` below shows an example of toggling between + // nondeterministic and deterministic implementations: + // + // void example_func() { + // // See Note [Enabling Deterministic Operations] + // if (at::globalContext().deterministicAlgorithms()) { + // example_func_deterministic(); + // } else { + // example_func_nondeterministic(); + // } + // } + + bool deterministicAlgorithms() const; + bool deterministicAlgorithmsWarnOnly() const; + void setDeterministicAlgorithms(bool, bool); + bool deterministicFillUninitializedMemory() const; + void setDeterministicFillUninitializedMemory(bool); + + // Note [Writing Nondeterministic Operations] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Operations in PyTorch that act nondeterministically and do not have an + // alternate deterministic implementation should satisfy the following + // requirements: + // + // * Include this comment: "See Note [Writing Nondeterministic Operations]" + // + // * Include a comment explaining why the operation is nondeterministic. + // + // * Throw an error when `Context::deterministicAlgorithms()` is true. Most + // of the time, this should be accomplished by calling + // `at::globalContext().alertNotDeterminstic()`. However, if the + // nondeterministic behavior is caused by the CuBLAS workspace + // configuration in CUDA >= 10.2, + // `at::globalContext().alertCuBLASConfigNotDeterministic()` should be + // called instead (in this case, a comment explaining why the operation is + // nondeterministic is not necessary). See below for details on these + // methods. + // + // * Have an entry in the list of nondeterministic PyTorch operations in the + // docstring of `use_deterministic_algorithms()` in torch/__init__.py + // + // * Have a test function in `test/test_torch.py` whose name begins with + // `test_nondeterministic_alert_`. Alternatively, if CuBLAS workspace + // configuration is the reason for nondeterminism, the operation should be + // included in the `test_cublas_config_nondeterministic_alert` test. Any new + // tests should ideally follow a pattern similar to the existing ones. + // + // `example_func()` below shows an example of the comments and error-throwing + // code for a nondeterministic operation: + // + // void example_func() { + // // See Note [Writing Nondeterministic Operations] + // // Nondeterministic because + // at::globalContext().alertNondeterministic("example_func"); + // ... + // } + + // Throws an error if `Context::deterministicAlgorithms()` is true + static void alertNotDeterministic(c10::string_view const& caller); + + // Throws an error if `Context::deterministicAlgorithms()` is true, CUDA + // >= 10.2, and CUBLAS_WORKSPACE_CONFIG is not set to either ":16:8" or + // ":4096:8". For more details: + // https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility + void alertCuBLASConfigNotDeterministic() const; + + void setFloat32MatmulPrecision(const std::string& s); + bool allowTF32CuDNN() const; + void setAllowTF32CuDNN(bool); + bool allowTF32CuBLAS() const; + void setAllowTF32CuBLAS(bool); + Float32MatmulPrecision float32MatmulPrecision() const; + void setFloat32MatmulPrecision(Float32MatmulPrecision p); + bool allowFP16ReductionCuBLAS() const; + void setAllowFP16ReductionCuBLAS(bool); + bool allowBF16ReductionCuBLAS() const; + void setAllowBF16ReductionCuBLAS(bool); + at::QEngine qEngine() const; + void setQEngine(at::QEngine e); + static const std::vector& supportedQEngines(); + static bool isXNNPACKAvailable(); + void setCheckSparseTensorInvariants(bool e); + bool checkSparseTensorInvariants() const; + // This method is used to release the original weight after pre-packing. + // It should be called once before loading/running the model. + // NB: By default it is set to true for mobile builds. + void setReleaseWeightsWhenPrepacking(bool e); + bool releaseWeightsWhenPrepacking() const; + + void setDisplayVmapFallbackWarnings(bool enabled); + bool areVmapFallbackWarningsEnabled() const; + + void setDefaultMobileCPUAllocator(); + void unsetDefaultMobileCPUAllocator(); + bool allowFP16ReductionCPU() const; + void setAllowFP16ReductionCPU(bool); + + private: + void initCUDAIfNeeded(c10::DeviceType p) { + if (p == c10::DeviceType::CUDA) { + lazyInitCUDA(); + } + } + void initHIPIfNeeded(c10::DeviceType p) { + if (p == c10::DeviceType::HIP) { + lazyInitHIP(); + } + } + void initXPUIfNeeded(c10::DeviceType p) { + if (p == c10::DeviceType::XPU) { + lazyInitXPU(); + } + } + static bool checkCuBLASConfigDeterministic(); + c10::once_flag thc_init; + c10::once_flag thh_init; + c10::once_flag thx_init; + c10::once_flag thp_init; + bool enabled_cudnn = true; + bool deterministic_cudnn = false; + bool _deterministic_algorithms = false; + bool _deterministic_algorithms_warn_only = false; + bool _deterministic_fill_uninitialized_memory = true; + bool enabled_flashSDP = true; + bool enabled_mem_efficientSDP = true; + bool enabled_mathSDP = true; + bool enabled_cudnnSDP = false; +#ifdef USE_ROCM + bool benchmark_cudnn = true; +#else + bool benchmark_cudnn = false; +#endif + Float32MatmulPrecision float32_matmul_precision = + c10::utils::check_env("TORCH_ALLOW_TF32_CUBLAS_OVERRIDE") == true + ? at::Float32MatmulPrecision::HIGH + : at::Float32MatmulPrecision::HIGHEST; + int benchmark_limit_cudnn = 10; + bool allow_tf32_cudnn = true; + bool allow_fp16_reduction_cublas = true; + bool allow_bf16_reduction_cublas = true; + bool enabled_mkldnn = true; + bool enabled_nnpack = true; + at::LinalgBackend linalg_preferred_backend = + c10::utils::check_env("TORCH_LINALG_PREFER_CUSOLVER") == true + ? at::LinalgBackend::Cusolver + : at::LinalgBackend::Default; +#ifdef C10_MOBILE + bool release_original_weights = true; +#else + bool release_original_weights = false; +#endif + bool display_vmap_fallback_warnings_ = false; + c10::optional quantized_engine = c10::nullopt; + bool enable_sparse_tensor_invariant_checks = false; + bool allow_fp16_reduction_cpu = false; + + Allocator* prev_allocator_ptr_{nullptr}; +}; + +TORCH_API Context& globalContext(); + +static inline void init() { + globalContext(); +} + +TORCH_API Allocator* getCPUAllocator(); + +static inline DeprecatedTypeProperties& getDeprecatedTypeProperties( + Backend p, + ScalarType s) { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + p, s); +} + +static inline DeprecatedTypeProperties& CPU(ScalarType s) { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + Backend::CPU, s); +} + +static inline DeprecatedTypeProperties& CUDA(ScalarType s) { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + Backend::CUDA, s); +} + +static inline DeprecatedTypeProperties& HIP(ScalarType s) { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + Backend::HIP, s); +} + +static inline DeprecatedTypeProperties& MPS(ScalarType s) { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + Backend::MPS, s); +} + +static inline bool hasCUDA() { + return globalContext().hasCUDA(); +} + +static inline bool hasMTIA() { + return globalContext().hasMTIA(); +} + +static inline bool hasHIP() { + return globalContext().hasHIP(); +} + +static inline bool hasIPU() { + return globalContext().hasIPU(); +} + +static inline bool hasXLA() { + return globalContext().hasXLA(); +} + +static inline bool hasMPS() { + return globalContext().hasMPS(); +} + +static inline bool hasORT() { + return globalContext().hasORT(); +} + +static inline bool hasXPU() { + return globalContext().hasXPU(); +} + +// Despite its name, this function returns the number of *CUDA* GPUs. +static inline size_t getNumGPUs() { + // WARNING: DO NOT ADD LOGIC TO HANDLE OTHER DEVICE TYPES TO THIS + // FUNCTION. If you are interested in interrogating the number of + // devices for a specific device type, add that function to the + // relevant library (e.g., similar to at::cuda::device_count()) + if (hasCUDA() && hasHIP()) { + throw std::runtime_error( + "Enabling both CUDA and HIP in ATen is not supported, as HIP masquerades " + "to be CUDA (e.g., when you say CUDA, on a HIP build of ATen, this actually " + "means HIP. Rebuild PyTorch with one or the other disabled."); + } else if (hasCUDA()) { + return detail::getCUDAHooks().getNumGPUs(); + } else if (hasHIP()) { + return detail::getHIPHooks().getNumGPUs(); + } else { + return 0; + } +} + +static inline bool hasOpenMP() { + return globalContext().hasOpenMP(); +} + +static inline bool hasMKL() { + return globalContext().hasMKL(); +} + +static inline bool hasLAPACK() { + return globalContext().hasLAPACK(); +} + +static inline bool hasMAGMA() { + return globalContext().hasMAGMA(); +} + +static inline bool hasMKLDNN() { + return globalContext().hasMKLDNN(); +} + +static inline void manual_seed(uint64_t seed) { + auto gen = globalContext().defaultGenerator(c10::DeviceType::CPU); + { + // See Note [Acquire lock when using random generators] + std::lock_guard lock(gen.mutex()); + gen.set_current_seed(seed); + } + // NB: Sometimes we build with CUDA, but we don't have any GPUs + // available. In that case, we must not seed CUDA; it will fail! + const auto cuda_num_gpus = detail::getCUDAHooks().getNumGPUs(); + if (hasCUDA() && cuda_num_gpus > 0) { + for (const auto i : c10::irange(cuda_num_gpus)) { + auto cuda_gen = globalContext().defaultGenerator( + Device(at::kCUDA, static_cast(i))); + { + // See Note [Acquire lock when using random generators] + std::lock_guard lock(cuda_gen.mutex()); + cuda_gen.set_current_seed(seed); + } + } + } + + const auto xpu_num_gpus = detail::getXPUHooks().getNumGPUs(); + if (hasXPU() && xpu_num_gpus) { + for (const auto i : c10::irange(xpu_num_gpus)) { + auto xpu_gen = globalContext().defaultGenerator( + Device(at::kXPU, static_cast(i))); + { + // See Note [Acquire lock when using random generators] + std::lock_guard lock(xpu_gen.mutex()); + xpu_gen.set_current_seed(seed); + } + } + } + + if (hasMPS()) { + auto mps_gen = globalContext().defaultGenerator(c10::DeviceType::MPS); + // See Note [Acquire lock when using random generators] + std::lock_guard lock(mps_gen.mutex()); + mps_gen.set_current_seed(seed); + } +} + +// When the global flag `allow_tf32` is set to true, cuBLAS handles are +// automatically configured to use math mode CUBLAS_TF32_TENSOR_OP_MATH. +// For some operators, such as addmv, TF32 offers no performance improvement +// but causes precision loss. To help this case, this class implements +// a RAII guard that can be used to quickly disable TF32 within its scope. +// +// Usage: +// NoTF32Guard disable_tf32; +struct TORCH_API NoTF32Guard { + NoTF32Guard(); + ~NoTF32Guard(); + static bool should_disable_tf32(); + + private: + bool changed = false; +}; + +struct TORCH_API ROCmBackwardPassGuard { + ROCmBackwardPassGuard(); + ~ROCmBackwardPassGuard(); + static bool is_backward_pass(); +}; + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h new file mode 100644 index 0000000000000000000000000000000000000000..b35c9657527d84c022ffeea845b9b37fdd17ee70 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include +#include + +// this convertor will: +// 1) take a Tensor object and wrap it in the DLPack tensor +// 2) take a dlpack tensor and convert it to the ATen Tensor + +namespace at { + +TORCH_API ScalarType toScalarType(const DLDataType& dtype); +TORCH_API DLManagedTensor* toDLPack(const Tensor& src); +TORCH_API Tensor fromDLPack(DLManagedTensor* src); +C10_DEPRECATED_MESSAGE("Please migrate to a non-const variant") +inline Tensor fromDLPack(const DLManagedTensor* src) { + return fromDLPack(const_cast(src)); +} +TORCH_API Tensor +fromDLPack(DLManagedTensor* src, std::function deleter); +TORCH_API DLDataType getDLDataType(const Tensor& t); +TORCH_API DLDevice getDLContext(const Tensor& tensor, const int64_t& device_id); + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Device.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Device.h new file mode 100644 index 0000000000000000000000000000000000000000..6c515580363c9e9aab3ee322678fd0cb0283aec8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Device.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DeviceAccelerator.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DeviceAccelerator.h new file mode 100644 index 0000000000000000000000000000000000000000..c3e800c7e07c65c4289baa46ba29d9b61cc5dd20 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DeviceAccelerator.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include + +#include +#include + +// This file defines the top level Accelerator concept for PyTorch. +// A device is an accelerator per the definition here if: +// - It is mutually exclusive with all other accelerators +// - It performs asynchronous compute via a Stream/Event system +// - It provides a set of common APIs as defined by AcceleratorHooksInterface +// +// As of today, accelerator devices are (in no particular order): +// CUDA, MTIA, PrivateUse1 +// We want to add once all the proper APIs are supported and tested: +// HIP, MPS, XPU + +namespace at { + +// Ensures that only one accelerator is available (at +// compile time if possible) and return it. +// When checked is true, the returned optional always has a value. +TORCH_API std::optional getAccelerator(bool checked = false); + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..adc7f3efdbb6a0e7d12fd6fcd0117089a83e8e85 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include +#include // TensorList whyyyyy + +namespace at { + +// Are you here because you're wondering why DeviceGuard(tensor) no +// longer works? For code organization reasons, we have temporarily(?) +// removed this constructor from DeviceGuard. The new way to +// spell it is: +// +// OptionalDeviceGuard guard(device_of(tensor)); + +/// Return the Device of a Tensor, if the Tensor is defined. +inline c10::optional device_of(const Tensor& t) { + if (t.defined()) { + return c10::make_optional(t.device()); + } else { + return c10::nullopt; + } +} + +inline c10::optional device_of(const c10::optional& t) { + return t.has_value() ? device_of(t.value()) : c10::nullopt; +} + +/// Return the Device of a TensorList, if the list is non-empty and +/// the first Tensor is defined. (This function implicitly assumes +/// that all tensors in the list have the same device.) +inline c10::optional device_of(ITensorListRef t) { + if (!t.empty()) { + return device_of(t.front()); + } else { + return c10::nullopt; + } +} + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Dimname.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Dimname.h new file mode 100644 index 0000000000000000000000000000000000000000..71836a9e25d3d82d9cd5024b2f33e147e14bf87e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Dimname.h @@ -0,0 +1 @@ +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ExpandBase.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ExpandBase.h new file mode 100644 index 0000000000000000000000000000000000000000..8db6be6a643c8cb60cab8487478f9a2f0c817d8b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ExpandBase.h @@ -0,0 +1,30 @@ +#include + +// Broadcasting utilities for working with TensorBase +namespace at { +namespace internal { +TORCH_API TensorBase expand_slow_path(const TensorBase& self, IntArrayRef size); +} // namespace internal + +inline c10::MaybeOwned expand_size( + const TensorBase& self, + IntArrayRef size) { + if (size.equals(self.sizes())) { + return c10::MaybeOwned::borrowed(self); + } + return c10::MaybeOwned::owned( + at::internal::expand_slow_path(self, size)); +} +c10::MaybeOwned expand_size(TensorBase&& self, IntArrayRef size) = + delete; + +inline c10::MaybeOwned expand_inplace( + const TensorBase& tensor, + const TensorBase& to_expand) { + return expand_size(to_expand, tensor.sizes()); +} +c10::MaybeOwned expand_inplace( + const TensorBase& tensor, + TensorBase&& to_expand) = delete; + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Formatting.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Formatting.h new file mode 100644 index 0000000000000000000000000000000000000000..392e2a27b0130c7ba55621d6ac1d6fd4e989db02 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Formatting.h @@ -0,0 +1 @@ +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/FunctionalStorageImpl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/FunctionalStorageImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..c003fc22babdc46161cbd78dd9b293777bdb549c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/FunctionalStorageImpl.h @@ -0,0 +1,126 @@ +#pragma once + +#include + +namespace at::functionalization { + +// See Note [Functionalization Pass In Core] + +// ViewMeta is a class used by the functionalization pass to navigate between +// a base tensor and a view tensor. +// For example, if I call `b = a.view1(...)` +// the functionalization pass will generate and store a ViewMeta on b that looks +// like: +// +// ViewMeta( +// [](const Tensor& base, int64_t mutated_view_idx) { +// return base.view1(...); +// }, +// [](const at::Tensor& base, const at::Tensor& mutated_view, +// int64_t mutated_view_idx) -> at::Tensor { +// return at::functionalization::impl::view1_inverse(base, mutated_view, +// ...); +// } +// +// The forward_fn lambda describes how to replay view1 on a tensor. +// +// The reverse_fn lambda describes how, given a tensor that is already a view, +// how to get the corresponding base tensor. See Note [Functionalization Pass: +// View Inverses] for details. +struct ViewMeta { + ViewMeta( + std::function forward, + std::function reverse, + bool is_multi_output = false, + int64_t out_idx = 0) + : forward_fn(std::move(forward)), + reverse_fn(std::move(reverse)), + out_index(out_idx), + is_multi_output(is_multi_output) {} + + std::function forward_fn; + std::function reverse_fn; + // See Note [out_idx in ViewMeta] + int64_t out_index; + + // Tells us if this is a multi-output view + bool is_multi_output; + + // Returns a copy of the current ViewMeta, if out_idx matches the current + // out_index. Otherwise, returns a new ViewMeta with the same forward/reverse + // functions, but a new out index. + ViewMeta to_out_idx(int64_t out_idx); +}; + +// FunctionalStorageImpl is a subclass of StorageImpl used by the +// functionalization pass. It has no underlying data (similar to meta storage). +// It also knows how to reflect mutations to tensors in the absence of a valid +// data pointer. +// +// A storage represents the state shared by (potentially multiple) views of the +// same tensor. For example, in the following code: +// +// b = a.view1(...) +// c = b.view2(...) +// b.add_(1) +// --> storage.add_update(b, {view1_meta}) +// +// The call to add_(1) will result in a call to alias.add_update(b, +// {view1_meta}), queueing up the mutation from b onto the alias. Later, suppose +// c is used in an expression (e.g. you try to print c, or pass it to an +// operator). Doing so will involve "syncing" c. First we apply any pending +// updates to the alias, and then we regenerate c by replaying its views off of +// the updated alias. E.g: +// +// print(str(c)) +// --> c.sync_() +// --> alias.apply_updates() // after this, the alias will be updated to +// reflect the mutation to b +struct TORCH_API FunctionalStorageImpl : public c10::StorageImpl { + public: + struct Update { + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const at::Tensor new_val; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const std::vector view_metas; + }; + + explicit FunctionalStorageImpl(const Tensor& value); + + void add_update( + const Tensor& updated_val, + const std::vector& view_metas); + bool apply_updates(); + const Tensor& base() { + return base_; + } + size_t generation() const { + return generation_; + } + void freeze() { + frozen_ = true; + } + + ~FunctionalStorageImpl() override = default; + + private: + // NB: base_ should always point to a tensor BELOW the current + // functionalization layer. This is mainly to avoid reference cycles. e.g. + // given `b = a.view(...)` Both a.storage_ and b.storage_ are a + // FunctionStorageImpl containing an Walualias, with contains a Tensor + // `base_`. In this case (where a and b are FunctionalTensorWrapper's), base_ + // should point not to a, but to a's unwrapped value, a.value_` See Note + // [Functionalization: Walualias Removal] for a diagram that shows this + // visually. + at::Tensor base_; + std::vector updates_; + // generation_ gets incremented every time a mutation is queued onto the + // alias. It is used to determine if a given tensor is "up to date", or if it + // needs to be regenerated from the alias. + size_t generation_ = 0; + // If frozen, no more mutations are allowed on this storage. Once frozen, a + // storage cannot be unfrozen. + bool frozen_ = false; +}; + +} // namespace at::functionalization diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Functions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Functions.h new file mode 100644 index 0000000000000000000000000000000000000000..8327d00ece5c0f2b2c1e81f88e54fa0c76d9e97d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Functions.h @@ -0,0 +1,1427 @@ +#pragma once + +// @generated by torchgen/gen.py from Functions.h + +#ifdef TORCH_ASSERT_NO_OPERATORS +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if your change would be better placed in \ + another file, or if a more specific header might achieve the same goal. \ + See NOTE: [Tensor vs. TensorBase] +#endif + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from and \ + see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +// NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS] +// +// In ATen, certain generated headers files include the definitions of +// every single operator in PyTorch. Unfortunately this means every +// time an operator signature is updated or changed in +// native_functions.yaml, you (and every other PyTorch developer) need +// to recompile every source file that includes any of these headers. +// +// To break up these header dependencies, and improve incremental +// build times for all PyTorch developers. These headers are split +// into per-operator headers in the `ATen/ops` folder. This limits +// incremental builds to only changes to methods of `Tensor`, or files +// that use the specific operator being changed. With `at::sum` as an +// example, you should include +// +// // instead of ATen/Functions.h +// // instead of ATen/NativeFunctions.h +// // instead of ATen/Operators.h +// // instead of ATen/CPUFunctions.h +// +// However, even if you're careful to use this in your own code. +// `Functions.h` might be included indirectly through another header +// without you realising. To avoid this, you can add +// +// #define TORCH_ASSERT_ONLY_METHOD_OPERATORS +// +// to the top of your source file. This way any time the non-specific +// headers are included, the compiler will error out. +// +// Also, be aware that `ops` are not available in all build +// configurations (namely fb-internal) so you must guard these +// includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g. +// +// #ifndef AT_PER_OPERATOR_HEADERS +// #include +// #else +// #include +// #endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { + + + +// Special C++ only overloads for std()-like functions (See gh-40287) +// These are needed because int -> bool conversion takes precedence over int -> IntArrayRef +// So, for example std(0) would select the std(unbiased=False) overload +TORCH_API inline Tensor var(const Tensor& self, int dim) { + return at::var(self, IntArrayRef{dim}); +} +TORCH_API inline std::tuple var_mean(const Tensor& self, int dim) { + return at::var_mean(self, IntArrayRef{dim}); +} +TORCH_API inline Tensor std(const Tensor& self, int dim) { + return at::std(self, IntArrayRef{dim}); +} +TORCH_API inline std::tuple std_mean(const Tensor& self, int dim) { + return at::std_mean(self, IntArrayRef{dim}); +} + +inline int64_t numel(const Tensor& tensor) { + return tensor.numel(); +} + +inline int64_t size(const Tensor& tensor, int64_t dim) { + return tensor.size(dim); +} + +inline int64_t stride(const Tensor& tensor, int64_t dim) { + return tensor.stride(dim); +} + +inline bool is_complex(const Tensor& tensor) { + return tensor.is_complex(); +} + +inline bool is_floating_point(const Tensor& tensor) { + return tensor.is_floating_point(); +} + +inline bool is_signed(const Tensor& tensor) { + return tensor.is_signed(); +} + +inline bool is_inference(const Tensor& tensor) { + return tensor.is_inference(); +} + +inline bool _is_zerotensor(const Tensor& tensor) { + return tensor._is_zerotensor(); +} + +inline bool is_conj(const Tensor& tensor) { + return tensor.is_conj(); +} + +inline Tensor conj(const Tensor& tensor) { + return tensor.conj(); +} + +inline bool is_neg(const Tensor& tensor) { + return tensor.is_neg(); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/InferSize.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/InferSize.h new file mode 100644 index 0000000000000000000000000000000000000000..111c7eb8f5fc7cd20a3eb812450324788608c011 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/InferSize.h @@ -0,0 +1,87 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +// Infers the size of a dim with size -1, if it exists. Also checks that new +// shape is compatible with the number of elements. +// +// templated to handle std::vector and DimVector use cases, see +// below +// +template +inline void infer_size_impl( + InputArrayRef shape, + NumelType numel, + ResultVec& res) { + NumelType newsize = 1; + // N.B. this is an index, not a sym dim! + auto infer_dim = c10::optional(); + for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) { + if (shape[dim] == -1) { + if (infer_dim) { + throw std::runtime_error("only one dimension can be inferred"); + } + infer_dim = dim; + } else if (shape[dim] >= 0) { + newsize *= shape[dim]; + } else { + AT_ERROR("invalid shape dimension ", shape[dim]); + } + } + + if (numel == newsize || (infer_dim && newsize > 0 && numel % newsize == 0)) { + if (infer_dim) { + // We have a degree of freedom here to select the dimension size; follow + // NumPy semantics and just bail. However, a nice error message is needed + // because users often use `view` as a way to flatten & unflatten + // dimensions and will otherwise be confused why + // empty_tensor.view( 0, 0) + // works yet + // empty_tensor.view(-1, 0) + // doesn't. + TORCH_CHECK( + newsize != 0, + "cannot reshape tensor of 0 elements into shape ", + shape, + " because the unspecified dimension size -1 can be any " + "value and is ambiguous"); + res[*infer_dim] = numel / newsize; + } + return; + } + + std::ostringstream ss; + ss << "shape '" << shape << "' is invalid for input of size " << numel; + throw std::runtime_error(ss.str()); +} + +inline std::vector infer_size(IntArrayRef shape, int64_t numel) { + auto res = shape.vec(); + infer_size_impl(shape, numel, res); + return res; +} + +inline at::DimVector infer_size_dv(IntArrayRef shape, int64_t numel) { + auto res = at::DimVector(shape); + infer_size_impl(shape, numel, res); + return res; +} + +inline at::SymDimVector infer_size_dv( + c10::SymIntArrayRef shape, + c10::SymInt numel) { + auto res = at::SymDimVector(shape); + infer_size_impl( + shape, std::move(numel), res); + return res; +} + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..d6914552eb0df70b18077c6ef10a55149790b5d6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace at { + +// Represents the initial TensorOptions, before the "defaults" are ever changed. +// This is designed to be used in library code, where the explicit devices, +// dtypes, etc. are known. NOTE: this is not a stable API. +inline TensorOptions initialTensorOptions() { + return TensorOptions(kCPU).dtype(kFloat).layout(kStrided).requires_grad( + false); +} + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h new file mode 100644 index 0000000000000000000000000000000000000000..4617afd0b72c7ce286e61a4d1abe2cc89743024c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +#include +#include + +namespace at { + +enum class LinalgBackend : int8_t { Default, Cusolver, Magma }; + +inline std::string LinalgBackendToString(at::LinalgBackend backend) { + switch (backend) { + case LinalgBackend::Default: + return "at::LinalgBackend::Default"; + case LinalgBackend::Cusolver: + return "at::LinalgBackend::Cusolver"; + case LinalgBackend::Magma: + return "at::LinalgBackend::Magma"; + default: + TORCH_CHECK(false, "Unknown linalg backend"); + } +} + +inline std::ostream& operator<<( + std::ostream& stream, + at::LinalgBackend backend) { + return stream << LinalgBackendToString(backend); +} + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..f4a30edef623956d5072737336bfca6da5cb2bb4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h @@ -0,0 +1,139 @@ +#pragma once + +#include +#include + +namespace at { + +enum MappedAllocatorModes { + ALLOCATOR_MAPPED_SHARED = 1, + ALLOCATOR_MAPPED_SHAREDMEM = 2, + ALLOCATOR_MAPPED_EXCLUSIVE = 4, + ALLOCATOR_MAPPED_NOCREATE = 8, + ALLOCATOR_MAPPED_KEEPFD = 16, + ALLOCATOR_MAPPED_FROMFD = 32, + ALLOCATOR_MAPPED_UNLINK = 64 +}; + +// Sentinel value/type to help distinguish the file descriptor constructor from +// the non-file descriptor constructor +enum WithFd { WITH_FD }; + +TORCH_API std::string NewProcessWideShmHandle(); + +class TORCH_API MapAllocator { + public: + MapAllocator(c10::string_view filename, int flags, size_t size); + MapAllocator( + WithFd, + c10::string_view filename, + int fd, + int flags, + size_t size); + MapAllocator(const MapAllocator&) = delete; + MapAllocator& operator=(const MapAllocator&) = delete; + MapAllocator(MapAllocator&&) = delete; + MapAllocator& operator=(MapAllocator&&) = delete; + + const char* filename() const { + return filename_.c_str(); + } + int fd() const { +#ifdef _WIN32 + TORCH_CHECK(false, "MapAllocator::fd() is unsupported on Windows"); +#else + return fd_; +#endif + } + ptrdiff_t size() const { + return size_; + } + // Return a pointer to the actual data for this allocator + // (in the case of the refcounted allocator, this is offset + // from the base pointer.) + virtual void* data() const { + return base_ptr_; + } + + static MapAllocator* fromDataPtr(const at::DataPtr&); + static at::DataPtr makeDataPtr( + c10::string_view filename, + int flags, + size_t size, + size_t* actual_size_out); + static at::DataPtr makeDataPtr( + WithFd, + const char* filename, + int fd, + int flags, + size_t size, + size_t* actual_size_out); + + // Closes the data. Helps us avoid destructor shenanigans + virtual void close(); + + // This is very dangerous. You have to redefine this destructor for each + // subclass + virtual ~MapAllocator(); + + protected: + bool closed_ = false; + std::string filename_; + int flags_ = 0; + ptrdiff_t size_; /* mapped size */ +#ifdef _WIN32 + void* handle_; + void* event_; + std::string eventname_; +#else + int fd_ = -1; +#endif + void* base_ptr_ = nullptr; +}; + +// Base-from-member idiom +struct TORCH_API RefcountedMapAllocatorArgCheck { + RefcountedMapAllocatorArgCheck(int flags); +}; + +class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck, + public MapAllocator { + public: + RefcountedMapAllocator(const char* filename, int flags, size_t size); + RefcountedMapAllocator( + WithFd, + const char* filename, + int fd, + int flags, + size_t size); + + static RefcountedMapAllocator* fromDataPtr(const at::DataPtr&); + static at::DataPtr makeDataPtr( + const char* filename, + int flags, + size_t size, + size_t* actual_size_out); + static at::DataPtr makeDataPtr( + WithFd, + const char* filename, + int fd, + int flags, + size_t size, + size_t* actual_size_out); + + void* data() const override; + + void incref(); + int decref(); + void close() override; + + ~RefcountedMapAllocator() override { + RefcountedMapAllocator::close(); + } + + protected: + void checkFlags(); + void initializeAlloc(); +}; + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MatrixRef.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MatrixRef.h new file mode 100644 index 0000000000000000000000000000000000000000..901efff4cc23fa3d1a4483cb330325431ac95f1e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MatrixRef.h @@ -0,0 +1,109 @@ +#pragma once +#include +#include + +#include + +namespace at { +/// MatrixRef - Like an ArrayRef, but with an extra recorded strides so that +/// we can easily view it as a multidimensional array. +/// +/// Like ArrayRef, this class does not own the underlying data, it is expected +/// to be used in situations where the data resides in some other buffer. +/// +/// This is intended to be trivially copyable, so it should be passed by +/// value. +/// +/// For now, 2D only (so the copies are actually cheap, without having +/// to write a SmallVector class) and contiguous only (so we can +/// return non-strided ArrayRef on index). +/// +/// P.S. dimension 0 indexes rows, dimension 1 indexes columns +template +class MatrixRef { + public: + typedef size_t size_type; + + private: + /// Underlying ArrayRef + ArrayRef arr; + + /// Stride of dim 0 (outer dimension) + size_type stride0; + + // Stride of dim 1 is assumed to be 1 + + public: + /// Construct an empty Matrixref. + /*implicit*/ MatrixRef() : arr(nullptr), stride0(0) {} + + /// Construct an MatrixRef from an ArrayRef and outer stride. + /*implicit*/ MatrixRef(ArrayRef arr, size_type stride0) + : arr(arr), stride0(stride0) { + TORCH_CHECK( + arr.size() % stride0 == 0, + "MatrixRef: ArrayRef size ", + arr.size(), + " not divisible by stride ", + stride0) + } + + /// @} + /// @name Simple Operations + /// @{ + + /// empty - Check if the matrix is empty. + bool empty() const { + return arr.empty(); + } + + const T* data() const { + return arr.data(); + } + + /// size - Get size a dimension + size_t size(size_t dim) const { + if (dim == 0) { + return arr.size() / stride0; + } else if (dim == 1) { + return stride0; + } else { + TORCH_CHECK( + 0, "MatrixRef: out of bounds dimension ", dim, "; expected 0 or 1"); + } + } + + size_t numel() const { + return arr.size(); + } + + /// equals - Check for element-wise equality. + bool equals(MatrixRef RHS) const { + return stride0 == RHS.stride0 && arr.equals(RHS.arr); + } + + /// @} + /// @name Operator Overloads + /// @{ + ArrayRef operator[](size_t Index) const { + return arr.slice(Index * stride0, stride0); + } + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + std::enable_if_t, MatrixRef>& operator=( + U&& Temporary) = delete; + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + std::enable_if_t, MatrixRef>& operator=( + std::initializer_list) = delete; +}; + +} // end namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MemoryOverlap.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MemoryOverlap.h new file mode 100644 index 0000000000000000000000000000000000000000..d41324249b39bc4f061a9cca62799057ac76ec43 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MemoryOverlap.h @@ -0,0 +1,42 @@ +#pragma once + +#include + +namespace c10 { +struct TensorImpl; +} + +namespace at { +class TensorBase; + +// MemOverlap: Whether or not there is memory overlap +// +// No: Absolutely no memory overlap +// Yes: Absolutely yes memory overlap +// TooHard: There might be memory overlap, but it was too expensive to compute. +// +// NB: Please update the python test for these if you renumber them. +enum class MemOverlap { No, Yes, TooHard }; + +enum class MemOverlapStatus { Full, Partial, No, TooHard }; + +TORCH_API MemOverlap has_internal_overlap(const TensorBase& t); +TORCH_API MemOverlap has_internal_overlap(c10::TensorImpl* t); + +TORCH_API void assert_no_internal_overlap(const TensorBase& t); +TORCH_API void assert_no_internal_overlap(c10::TensorImpl* t); + +TORCH_API MemOverlapStatus +get_overlap_status(const TensorBase& a, const TensorBase& b); +TORCH_API MemOverlapStatus +get_overlap_status(const c10::TensorImpl* a, const c10::TensorImpl* b); + +TORCH_API void assert_no_partial_overlap( + const TensorBase& a, + const TensorBase& b); +void assert_no_partial_overlap(c10::TensorImpl* a, c10::TensorImpl* b); + +TORCH_API void assert_no_overlap(const TensorBase& a, const TensorBase& b); +TORCH_API void assert_no_overlap(c10::TensorImpl* a, c10::TensorImpl* b); + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions_inl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..2111758cb07be2a4ab5bfe932688ed394e53d1e8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions_inl.h @@ -0,0 +1,324 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..c1443b7eaa01b4d3215e14c478a5c38195e0a5c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h @@ -0,0 +1,215 @@ +#pragma once +#include +#include +#include + +#include +#include +#include + +namespace at { + +using NameVector = SmallVector; + +inline bool has_names(const ITensorListRef& tensors) { + return std::any_of(tensors.begin(), tensors.end(), [](const Tensor& t) { + return t.has_names(); + }); +} + +// Converts dim to an positional index. Errors if `dim` cannot be used to +// refer to any dimension of tensor. +TORCH_API int64_t dimname_to_position(const Tensor& tensor, Dimname dim); +TORCH_API std::vector dimnames_to_positions( + const Tensor& tensor, + DimnameList dims); + +// Unifies two DimnameList to produce a third. This is useful for implementing +// the named inference rule for binary broadcasting operations like add. +// +// There are three main constraints: +// 1) Check matching: Names must match positionally from the right. +// 2) Check misaligned: If a name `n` is in `names`, then it must appear at +// the same index from the right in other. +// 3) The output names are obtained by unifying the names individually from the +// right. +TORCH_API std::vector unify_from_right( + DimnameList names, + DimnameList other, + const char* action = "broadcast"); + +[[noreturn]] inline void reportNYIDimnameOverload(const char* op_name) { + TORCH_CHECK( + false, + op_name, + ": You passed a dimname (string) to this op in place of a dimension " + "index but it does not yet support this behavior. Please pass a dimension " + "index to work around this."); +} + +// [NOTE] Writing name inference rules +// +// Operators that support named tensors are either composed of operations that +// support named tensors or implement some name inference rule. An op that +// implements its own name inference rule generally looks like the following: +// +// Tensor op(...) { +// perform_shape_checks(...); +// # (1) +// auto maybe_outnames = compute_outnames(...); +// auto result = [&]() { +// NoNamesGuard guard; +// return op_impl(...); +// }(); +// # (2) +// propagate_names_if_nonempty(result, maybe_outnames); +// +// Each op has (1) a compute outnames step and (2) a propagate names step. +// +// compute_outnames is responsible for checking that input names match and +// determining what the output names should be. It returns either: +// - {} (if the inputs tensors are all unnamed) +// - non-empty outnames. +// +// propagate_names_if_nonempty propagates the outnames if they exist to the +// result tensors. +// +// The {} case is an optimization; if the user does not use named tensors they +// pay no perf cost for it. + +namespace namedinference { + +const Tensor& propagate_names_if_present_and_nonempty( + const Tensor& result, + c10::optional maybe_names, + bool validate_names = false); +// Propagates `names` to `result` if `names` is not empty. +// `names` can be empty; see [NOTE] Writing name inference rules +// If `names` is not empty, `names.size()` should equal `result.dim()`. +// When in doubt, use this overload instead of the others. +TORCH_API const Tensor& propagate_names_if_nonempty( + const Tensor& result, + DimnameList maybe_names, + bool validate_names = false); + +// Propagates `names` to `result`. Only use this if we are certain that there +// are names to propagate (that names is not empty). +TORCH_API const Tensor& propagate_names( + const Tensor& result, + DimnameList names, + bool validate_names = false); + +// Propagates all names from src to result. +TORCH_API void propagate_names(const Tensor& result, const Tensor& src); + +// Propagates all names except for those at the excluded_idxs. +TORCH_API void propagate_names_except( + const Tensor& result, + const Tensor& src, + IntArrayRef excluded_idxs); + +// Used for reduction ops that have a `keepdim` arg. +TORCH_API void propagate_names_for_reduction( + const Tensor& result, + const Tensor& src, + IntArrayRef excluded_idxs, + bool keepdim); + +TORCH_API void propagate_names_for_expand( + const Tensor& result, + const Tensor& self); + +TORCH_API std::vector compute_cat_outnames( + const MaterializedITensorListRef& tensors); + +TORCH_API std::vector compute_broadcast_outnames( + const Tensor& self, + const Tensor& other); + +TORCH_API std::vector broadcast_to_outnames( + const Tensor& tensor, + const Tensor& reference_tensor, + const char* op_name); + +TORCH_API std::vector compute_matmul_outnames( + const Tensor& self, + const Tensor& other); + +TORCH_API std::vector compute_cdist_outnames( + const Tensor& self, + const Tensor& other); + +TORCH_API std::vector compute_bmm_outnames( + const Tensor& result, + const Tensor& self, + const Tensor& other); + +TORCH_API std::vector compute_squeeze_outnames(const Tensor& tensor); +TORCH_API std::vector compute_squeeze_outnames( + const Tensor& tensor, + std::bitset dims); + +std::vector compute_diagonal_outnames( + const Tensor& tensor, + int64_t dim1, + int64_t dim2); + +// TensorImpl* overloads for Legacy TH/THC code. Use these sparingly. + +TORCH_API TensorImpl* propagate_names_if_nonempty( + TensorImpl* result, + DimnameList maybe_names, + bool validate_names = false); + +TORCH_API TensorImpl* propagate_names( + TensorImpl* result, + DimnameList names, + bool validate_names = false); + +TORCH_API void propagate_names(TensorImpl* result, /*const */ TensorImpl* src); + +TORCH_API inline void propagate_names( + const TensorBase& result, + DimnameList names, + bool validate_names = false) { + propagate_names(result.unsafeGetTensorImpl(), names, validate_names); +} + +TORCH_API inline void propagate_names_if_nonempty( + const TensorBase& result, + DimnameList names, + bool validate_names = false) { + propagate_names_if_nonempty( + result.unsafeGetTensorImpl(), names, validate_names); +} + +TORCH_API inline void propagate_names( + const TensorBase& result, + const TensorBase& src) { + propagate_names(result.unsafeGetTensorImpl(), src.unsafeGetTensorImpl()); +} + +// result = m1 @ m2 + bias +TORCH_API std::vector propagate_names_for_addmm( + const Tensor& m1, + const Tensor& m2, + const Tensor& bias); + +TORCH_API std::vector propagate_names_for_addmv( + const Tensor& mat, + const Tensor& vec, + const Tensor& bias); + +TORCH_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2); + +TORCH_API std::vector compute_baddbmm_outnames( + const Tensor& result, + const Tensor& self, + const Tensor& other, + const Tensor& bias); + +TORCH_API bool are_names_equal(TensorImpl* self, TensorImpl* other); + +} // namespace namedinference + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..0ad42ae816274117a276a9545e2057b5eb252a6c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h @@ -0,0 +1,283 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at::native { +struct NestedTensorImpl; +inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt); +int64_t get_numel_from_nested_size_tensor(const at::Tensor& tensor); + +struct TORCH_API NestedTensorImpl : public c10::TensorImpl { + explicit NestedTensorImpl( + Storage storage, + c10::DispatchKeySet key_set, + const caffe2::TypeMeta data_type, + at::Tensor nested_sizes, + at::Tensor nested_strides, + at::Tensor storage_offsets); + + explicit NestedTensorImpl( + const at::Tensor& buffer, + at::Tensor nested_sizes, + at::Tensor nested_strides, + at::Tensor storage_offsets); + // assume contiguous, `nested_strides` and `offsets` + // can be infered from `nested_sizes` + explicit NestedTensorImpl( + const at::Tensor& buffer, + const at::Tensor& nested_sizes); + + // This constructor is used creating view tensors from nested tensors + explicit NestedTensorImpl( + c10::TensorImpl::ImplType impl_type, + const at::Tensor& base_tensor, + at::Tensor nested_sizes, + at::Tensor nested_strides, + at::Tensor storage_offsets); + + // TODO: don't expose private implementation details like this; in + // particular, resizing this tensor will mess up our dim() and + // callers cannot fix it. + const Tensor& get_nested_sizes() const { + return nested_sizes_; + } + // TODO: don't expose private implementation details like this + const Tensor& get_nested_strides() const { + return nested_strides_; + } + const Tensor& get_storage_offsets() const { + return storage_offsets_; + } + // Returns nullopt if the ith dimension is irregular. The ith dimension + // of a NestedTensor is regular if the unbound tensors match in + // size at the (i-1)th dimension. + c10::optional opt_size(int64_t d) const; + + int64_t size(int64_t d) const { + c10::optional optional_size = this->opt_size(d); + TORCH_CHECK( + optional_size.has_value(), + "Given dimension ", + d, + " is irregular and does not have a size."); + return *optional_size; + } + /** + * Return a view of the nested tensor as a 1 dimensional contiguous tensor. + * + * The buffer tensor created by this function shares the same storage_impl as + * the original nested tensor, and therefore can be seen as a view. + * + * @return A newly constructed view tensor + */ + at::Tensor get_buffer() const { + TORCH_CHECK( + nested_tensor_impl_is_contiguous(this), + "NestedTensor must be contiguous to get buffer."); + return get_unsafe_storage_as_tensor(); + } + /** + * If possible use get_buffer() instead. This function returns the storage + * as a tensor directly, which is not safe to use in general. If using this + * function, The caller must ensure to account for nested_sizes, + * nested_strides and storage_offsets. + * + * @return A newly constructed view tensor + */ + at::Tensor get_unsafe_storage_as_tensor() const { + auto buffer_key_set_ = generate_buffer_key_set(); + const auto buffer_size = get_buffer_size(); + auto buffer_tensor_impl = c10::make_intrusive( + c10::TensorImpl::VIEW, Storage(storage_), buffer_key_set_, data_type_); + buffer_tensor_impl->set_sizes_contiguous( + c10::makeArrayRef(static_cast(buffer_size))); + return Tensor(buffer_tensor_impl); + } + + size_t get_buffer_size() const { + return storage_.nbytes() / data_type_.itemsize(); + } + + protected: + const char* tensorimpl_type_name() const override; + + // TODO: numel_custom and is_contiguous_custom can be profitably overridden + // with real implementations + int64_t numel_custom() const override; + c10::SymInt sym_numel_custom() const override; + bool is_contiguous_custom(MemoryFormat) const override; + int64_t size_custom(int64_t d) const override { + return this->size(d); + } + c10::SymInt sym_size_custom(int64_t d) const override { + return c10::SymInt{this->size(d)}; + } + IntArrayRef sizes_custom() const override; + c10::SymIntArrayRef sym_sizes_custom() const override; + IntArrayRef strides_custom() const override; + c10::SymIntArrayRef sym_strides_custom() const override; + + // this one is real + int64_t dim_custom() const override; + + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override; + + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override; + + void shallow_copy_from(const c10::intrusive_ptr& impl) override { + copy_tensor_metadata( + /*src_impl=*/impl.get(), + /*dest_impl=*/this, + /*version_counter=*/version_counter(), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change()); + } + + private: + // Must be called after any changes to our dim() to sync the state + // to TensorImpl. + void refresh_dim(); + + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const at::Tensor nested_sizes_, nested_strides_; + // The starting positions of the underlying tensors in contiguous buffer + // i.e. the buffer memory offsets to get the underlying tensors + // The reason to keep this metadata is that, without strong enough constraint + // it cannot be derived from `nested_sizes_` + // and `nested_strides_`: + // 1. when buffer has blanks, e.g. [tensor1, blank, tensor2] + // this can happen e.g. after slicing a nested tensor + // 2. when multiple tensors share a same memory + // 3. when the nesting ordering is changed, e.g. [tensor1, tensor3, tensor2] + // Some strong enough constraints are: + // 1. every underlying tensor is contiguous in memory + // && nesting in ascending order + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const at::Tensor storage_offsets_; + // NOTE: -1 here means the size is missing + // Optional to allow it to be computed lazily from nested. + // TODO: maybe we can remove this metadata since + // we can compute it from `nested_sizes_` + mutable c10::optional> opt_sizes_; + + template + c10::intrusive_ptr shallow_copy_and_detach_core( + VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const; + + /** + * Generates a non-nested key_set from a nested tensor. + * + * For many nested tensor kernel implementations a buffer tensor + * is generated and redispatched to a non-nested kernel this function + * generates the key set used by that buffer tensor + * + * @return Appropriate key set for non-nested tensor + */ + inline c10::DispatchKeySet generate_buffer_key_set() const { + auto buffer_key_set = this->key_set(); + const bool Autograd = buffer_key_set.has_any(c10::autograd_dispatch_keyset); + // Remove nested tensor specific keys + buffer_key_set = buffer_key_set - + c10::DispatchKeySet{ + c10::DispatchKey::NestedTensor, + c10::DispatchKey::AutogradNestedTensor}; + + // Add dense tensor specific keys + buffer_key_set = + buffer_key_set | c10::DispatchKeySet{c10::DispatchKey::Dense}; + buffer_key_set = Autograd + ? c10::DispatchKeySet{c10::DispatchKey::Autograd} | buffer_key_set + : buffer_key_set; + + return buffer_key_set; + } +}; + +inline NestedTensorImpl* get_nested_tensor_impl_or_null( + const at::Tensor& tensor) { + if (tensor.is_nested()) { + return static_cast(tensor.unsafeGetTensorImpl()); + } + return nullptr; +} + +inline NestedTensorImpl* get_nested_tensor_impl(const at::Tensor& tensor) { + TORCH_CHECK( + tensor.is_nested(), "get_nested_tensor_impl requires a NestedTensor."); + return static_cast(tensor.unsafeGetTensorImpl()); +} + +inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt) { + int64_t ntensors = nt->size(0); + if (ntensors == 0) { + return true; + } + const Tensor &sizemat = nt->get_nested_sizes(), + &stridemat = nt->get_nested_strides(); + int64_t* offsets_ptr = nt->get_storage_offsets().data_ptr(); + int64_t orig_dim = sizemat.size(1); + // nesting scalars + if (orig_dim == 0) { + // each scalar must be contiguous + // if there is blank memory between underlying scalars + for (int64_t i = 0; i < ntensors; i++) { + if (offsets_ptr[i] != i) { + return false; + } + } + } + // nesting tensors + else { + // if any underlying tensor is non-contiguous + const int64_t *sizemat_ptr = sizemat.data_ptr(), + *stridemat_ptr = stridemat.data_ptr(); + for (int64_t i = 0; i < ntensors; i++) { + if (stridemat_ptr[orig_dim - 1] != 1) { + return false; + } + int64_t product = sizemat_ptr[orig_dim - 1]; + for (int64_t j = orig_dim - 2; j >= 0; j--) { + if (stridemat_ptr[j] != product) { + return false; + } + product *= sizemat_ptr[j]; + } + sizemat_ptr += orig_dim; + stridemat_ptr += orig_dim; + } + // if there is blank memory between underlying tensors + if (offsets_ptr[0] != 0) { + return false; + } + sizemat_ptr = sizemat.data_ptr(); + stridemat_ptr = stridemat.data_ptr(); + for (int64_t i = 1; i < ntensors; i++) { + if (offsets_ptr[i] != + offsets_ptr[i - 1] + *sizemat_ptr * *stridemat_ptr) { + return false; + } + sizemat_ptr += orig_dim; + stridemat_ptr += orig_dim; + } + } + // everything is fine + return true; +} + +inline const at::Tensor& get_nested_sizes(const at::Tensor& tensor) { + return get_nested_tensor_impl(tensor)->get_nested_sizes(); +} + +} // namespace at::native diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NumericUtils.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NumericUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..788da64b4e4274bcda9adce01c39902d2aae61ef --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NumericUtils.h @@ -0,0 +1,203 @@ +#pragma once + +#ifdef __HIPCC__ +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace at { + +// std::isnan isn't performant to use on integral types; it will +// (uselessly) convert to floating point and then do the test. +// This function is. + +template , int> = 0> +inline C10_HOST_DEVICE bool _isnan(T /*val*/) { + return false; +} + +template , int> = 0> +inline C10_HOST_DEVICE bool _isnan(T val) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return ::isnan(val); +#else + return std::isnan(val); +#endif +} + +template ::value, int> = 0> +inline C10_HOST_DEVICE bool _isnan(T val) { + return std::isnan(val.real()) || std::isnan(val.imag()); +} + +template , int> = 0> +inline C10_HOST_DEVICE bool _isnan(T val) { + return at::_isnan(static_cast(val)); +} + +template < + typename T, + std::enable_if_t, int> = 0> +inline C10_HOST_DEVICE bool _isnan(at::BFloat16 val) { + return at::_isnan(static_cast(val)); +} + +inline C10_HOST_DEVICE bool _isnan(at::BFloat16 val) { + return at::_isnan(static_cast(val)); +} + +template < + typename T, + std::enable_if_t, int> = 0> +inline C10_HOST_DEVICE bool _isnan(T val) { + return val.isnan(); +} + +template < + typename T, + std::enable_if_t, int> = 0> +inline C10_HOST_DEVICE bool _isnan(T val) { + return val.isnan(); +} + +template < + typename T, + std::enable_if_t, int> = 0> +inline C10_HOST_DEVICE bool _isnan(T val) { + return val.isnan(); +} + +template < + typename T, + std::enable_if_t, int> = 0> +inline C10_HOST_DEVICE bool _isnan(T val) { + return val.isnan(); +} + +// std::isinf isn't performant to use on integral types; it will +// (uselessly) convert to floating point and then do the test. +// This function is. + +template , int> = 0> +inline C10_HOST_DEVICE bool _isinf(T /*val*/) { + return false; +} + +template , int> = 0> +inline C10_HOST_DEVICE bool _isinf(T val) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return ::isinf(val); +#else + return std::isinf(val); +#endif +} + +inline C10_HOST_DEVICE bool _isinf(at::Half val) { + return at::_isinf(static_cast(val)); +} + +inline C10_HOST_DEVICE bool _isinf(at::BFloat16 val) { + return at::_isinf(static_cast(val)); +} + +inline C10_HOST_DEVICE bool _isinf(at::Float8_e5m2 val) { + return val.isinf(); +} + +inline C10_HOST_DEVICE bool _isinf(at::Float8_e4m3fn val) { + return false; +} + +inline C10_HOST_DEVICE bool _isinf(at::Float8_e5m2fnuz val) { + return false; +} + +inline C10_HOST_DEVICE bool _isinf(at::Float8_e4m3fnuz val) { + return false; +} + +template +C10_HOST_DEVICE inline T exp(T x) { + static_assert( + !std::is_same_v, + "this template must be used with float or less precise type"); +#if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__) + // use __expf fast approximation for peak bandwidth + return __expf(x); +#else + return ::exp(x); +#endif +} + +template <> +C10_HOST_DEVICE inline double exp(double x) { + return ::exp(x); +} + +template +C10_HOST_DEVICE inline T log(T x) { + static_assert( + !std::is_same_v, + "this template must be used with float or less precise type"); +#if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__) + // use __logf fast approximation for peak bandwidth + return __logf(x); +#else + return ::log(x); +#endif +} + +template <> +C10_HOST_DEVICE inline double log(double x) { + return ::log(x); +} + +template +C10_HOST_DEVICE inline T log1p(T x) { + static_assert( + !std::is_same_v, + "this template must be used with float or less precise type"); +#if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__) + // use __logf fast approximation for peak bandwidth + // NOTE: There is no __log1pf so unfortunately we lose precision. + return __logf(1.0f + x); +#else + return ::log1p(x); +#endif +} + +template <> +C10_HOST_DEVICE inline double log1p(double x) { + return ::log1p(x); +} + +template +C10_HOST_DEVICE inline T tan(T x) { + static_assert( + !std::is_same_v, + "this template must be used with float or less precise type"); +#if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__) + // use __tanf fast approximation for peak bandwidth + return __tanf(x); +#else + return ::tan(x); +#endif +} + +template <> +C10_HOST_DEVICE inline double tan(double x) { + return ::tan(x); +} + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h new file mode 100644 index 0000000000000000000000000000000000000000..d00195b07e490208db6aa9a015bca79b0cc1c83f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h @@ -0,0 +1,69 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +// For FP16 or BFloat16 inputs, ops should perform internal math in FP32. +template +struct OpMathType { + using type = scalar_t; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType> { + using type = c10::complex; +}; + +template +using opmath_type = typename OpMathType::type; + +namespace { + +inline c10::ScalarType toOpMathType(const c10::ScalarType type) { + switch (type) { +#define DEFINE_CASE(scalar_t, TypeNum) \ + case ScalarType::TypeNum: \ + return CppTypeToScalarType>::value; + + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type); + } +} + +} // namespace + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/PTThreadPool.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/PTThreadPool.h new file mode 100644 index 0000000000000000000000000000000000000000..1c910dfb97dce44748c054b457b400b13b1b9fda --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/PTThreadPool.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include + +namespace at { + +class TORCH_API PTThreadPool : public c10::ThreadPool { + public: + explicit PTThreadPool(int pool_size, int numa_node_id = -1) + : c10::ThreadPool(pool_size, numa_node_id, []() { + c10::setThreadName("PTThreadPool"); + at::init_num_threads(); + }) {} +}; + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelFuture.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelFuture.h new file mode 100644 index 0000000000000000000000000000000000000000..042cd92da19345d7523671ca75da7279d13062a9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelFuture.h @@ -0,0 +1,13 @@ +#pragma once + +#include +#include +#include + +namespace at { + +// Launches intra-op parallel task, returns a future +TORCH_API c10::intrusive_ptr intraop_launch_future( + std::function func); + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h new file mode 100644 index 0000000000000000000000000000000000000000..9193e06ed695233637fe5ee8344777e3e42c799b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h @@ -0,0 +1,52 @@ +#pragma once + +#include +#include +#include + +#include + +#ifdef _WIN32 +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#endif +#include + +#define INTRA_OP_PARALLEL + +namespace at::internal { + +template +inline void invoke_parallel( + const int64_t begin, + const int64_t end, + const int64_t grain_size, + const F& f) { + // Choose number of tasks based on grain size and number of threads. + int64_t chunk_size = divup((end - begin), get_num_threads()); + // Make sure each task is at least grain_size size. + chunk_size = std::max(grain_size, chunk_size); + + std::atomic_flag err_flag = ATOMIC_FLAG_INIT; + std::exception_ptr eptr; + tbb::parallel_for( + tbb::blocked_range(begin, end, chunk_size), + [&eptr, &err_flag, f](const tbb::blocked_range& r) { + try { + internal::ThreadIdGuard tid_guard( + tbb::this_task_arena::current_thread_index()); + f(r.begin(), r.end()); + } catch (...) { + if (!err_flag.test_and_set()) { + eptr = std::current_exception(); + } + } + }, + tbb::static_partitioner{}); + if (eptr) { + std::rethrow_exception(eptr); + } +} + +} // namespace at::internal diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h new file mode 100644 index 0000000000000000000000000000000000000000..84e744ba10b10af06a234ade767c2a1caa34d9fa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include +#include + +#ifdef _OPENMP +#define INTRA_OP_PARALLEL + +#include +#endif + +#ifdef _OPENMP +namespace at::internal { +template +inline void invoke_parallel( + int64_t begin, + int64_t end, + int64_t grain_size, + const F& f) { + std::atomic_flag err_flag = ATOMIC_FLAG_INIT; + std::exception_ptr eptr; + +#pragma omp parallel + { + // choose number of tasks based on grain size and number of threads + // can't use num_threads clause due to bugs in GOMP's thread pool (See + // #32008) + int64_t num_threads = omp_get_num_threads(); + if (grain_size > 0) { + num_threads = std::min(num_threads, divup((end - begin), grain_size)); + } + + int64_t tid = omp_get_thread_num(); + int64_t chunk_size = divup((end - begin), num_threads); + int64_t begin_tid = begin + tid * chunk_size; + if (begin_tid < end) { + try { + internal::ThreadIdGuard tid_guard(tid); + f(begin_tid, std::min(end, chunk_size + begin_tid)); + } catch (...) { + if (!err_flag.test_and_set()) { + eptr = std::current_exception(); + } + } + } + } + if (eptr) { + std::rethrow_exception(eptr); + } +} +} // namespace at::internal +#endif // _OPENMP diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/RedispatchFunctions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/RedispatchFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..474607d3951bd7d14f2e62635ddc2cb4fb2fa26b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/RedispatchFunctions.h @@ -0,0 +1,24791 @@ +#pragma once + +// @generated by torchgen/gen.py from RedispatchFunctions.h + +#ifdef TORCH_ASSERT_ONLY_METHOD_OPERATORS +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider using the at::_ops::{name}::redispatch() interface by including \ + the specific operator from +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +namespace redispatch { + + // aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Byte(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Byte::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Char(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Char::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Double(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Double::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Float(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Float::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Int(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Int::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Long(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Long::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Short(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Short::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Half(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Half::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> () + inline void __dispatch__backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList inputs, const c10::optional & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false) { + return at::_ops::_backward::redispatch(dispatchKeySet, self, inputs, gradient, retain_graph, create_graph); + } + + // aten::set_data(Tensor(a!) self, Tensor new_data) -> () + inline void __dispatch_set_data(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & new_data) { + return at::_ops::set_data::redispatch(dispatchKeySet, self, new_data); + } + + // aten::data(Tensor self) -> Tensor + inline at::Tensor __dispatch_data(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::data::redispatch(dispatchKeySet, self); + } + + // aten::is_leaf(Tensor self) -> bool + inline bool __dispatch_is_leaf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_leaf::redispatch(dispatchKeySet, self); + } + + // aten::output_nr(Tensor self) -> int + inline int64_t __dispatch_output_nr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::output_nr::redispatch(dispatchKeySet, self); + } + + // aten::_version(Tensor self) -> int + inline int64_t __dispatch__version(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_version::redispatch(dispatchKeySet, self); + } + + // aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!) + inline at::Tensor & __dispatch_requires_grad_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool requires_grad=true) { + return at::_ops::requires_grad_::redispatch(dispatchKeySet, self, requires_grad); + } + + // aten::retain_grad(Tensor(a!) self) -> () + inline void __dispatch_retain_grad(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::retain_grad::redispatch(dispatchKeySet, self); + } + + // aten::retains_grad(Tensor self) -> bool + inline bool __dispatch_retains_grad(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::retains_grad::redispatch(dispatchKeySet, self); + } + + // aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a) + inline at::Tensor _fw_primal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level) { + return at::_ops::_fw_primal::redispatch(dispatchKeySet, self, level); + } + + // aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a) + inline at::Tensor _make_dual(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { + return at::_ops::_make_dual::redispatch(dispatchKeySet, primal, tangent, level); + } + + // aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent) + inline ::std::tuple _unpack_dual(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dual, int64_t level) { + return at::_ops::_unpack_dual::redispatch(dispatchKeySet, dual, level); + } + + // aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor + inline at::Tensor _new_zeros_with_same_feature_meta(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims=0) { + return at::_ops::_new_zeros_with_same_feature_meta::redispatch(dispatchKeySet, self, other, self_num_batch_dims); + } + + // aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool + inline bool _has_same_storage_numel(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_has_same_storage_numel::redispatch(dispatchKeySet, self, other); + } + + // aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!) + inline at::Tensor & rename_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional names) { + return at::_ops::rename_::redispatch(dispatchKeySet, self, names); + } + + // aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a) + inline at::Tensor rename(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional names) { + return at::_ops::rename::redispatch(dispatchKeySet, self, names); + } + + // aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a) + inline at::Tensor align_to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList names) { + return at::_ops::align_to::redispatch(dispatchKeySet, self, names); + } + + // aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a) + inline at::Tensor align_to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) { + return at::_ops::align_to_ellipsis_idx::redispatch(dispatchKeySet, self, order, ellipsis_idx); + } + + // aten::align_as(Tensor self, Tensor other) -> Tensor + inline at::Tensor align_as(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::align_as::redispatch(dispatchKeySet, self, other); + } + + // aten::align_tensors(Tensor[] tensors) -> Tensor[] + inline ::std::vector align_tensors(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::align_tensors::redispatch(dispatchKeySet, tensors); + } + + // aten::_assert_async(Tensor self) -> () + inline void _assert_async(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_assert_async::redispatch(dispatchKeySet, self); + } + + // aten::_assert_async.msg(Tensor self, str assert_msg) -> () + inline void _assert_async(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view assert_msg) { + return at::_ops::_assert_async_msg::redispatch(dispatchKeySet, self, assert_msg); + } + + // aten::_assert_scalar(Scalar self, str assert_msg) -> () + inline void _assert_scalar(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, c10::string_view assert_msg) { + return at::_ops::_assert_scalar::redispatch(dispatchKeySet, self, assert_msg); + } + + // aten::_functional_assert_scalar(Scalar self, str assert_msg, Tensor dep_token) -> Tensor + inline at::Tensor _functional_assert_scalar(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, c10::string_view assert_msg, const at::Tensor & dep_token) { + return at::_ops::_functional_assert_scalar::redispatch(dispatchKeySet, self, assert_msg, dep_token); + } + + // aten::_functional_assert_async.msg(Tensor self, str assert_msg, Tensor dep_token) -> Tensor + inline at::Tensor _functional_assert_async(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view assert_msg, const at::Tensor & dep_token) { + return at::_ops::_functional_assert_async_msg::redispatch(dispatchKeySet, self, assert_msg, dep_token); + } + + // aten::_assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None) -> () + inline void _assert_tensor_metadata(c10::DispatchKeySet dispatchKeySet, const at::Tensor & a, at::OptionalIntArrayRef size=c10::nullopt, at::OptionalIntArrayRef stride=c10::nullopt, c10::optional dtype=c10::nullopt) { + return at::_ops::_assert_tensor_metadata::redispatch(dispatchKeySet, a, size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*size)) : c10::nullopt, stride.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*stride)) : c10::nullopt, dtype); + } + + // aten::_assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None) -> () + inline void _assert_tensor_metadata_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & a, at::OptionalSymIntArrayRef size=c10::nullopt, at::OptionalSymIntArrayRef stride=c10::nullopt, c10::optional dtype=c10::nullopt) { + return at::_ops::_assert_tensor_metadata::redispatch(dispatchKeySet, a, size, stride, dtype); + } + + // aten::_print(str s) -> () + inline void _print(c10::DispatchKeySet dispatchKeySet, c10::string_view s) { + return at::_ops::_print::redispatch(dispatchKeySet, s); + } + + // aten::sym_constrain_range(Scalar size, *, int? min=None, int? max=None) -> () + inline void sym_constrain_range(c10::DispatchKeySet dispatchKeySet, const at::Scalar & size, c10::optional min=c10::nullopt, c10::optional max=c10::nullopt) { + return at::_ops::sym_constrain_range::redispatch(dispatchKeySet, size, min, max); + } + + // aten::sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> () + inline void sym_constrain_range_for_size(c10::DispatchKeySet dispatchKeySet, const at::Scalar & size, c10::optional min=c10::nullopt, c10::optional max=c10::nullopt) { + return at::_ops::sym_constrain_range_for_size::redispatch(dispatchKeySet, size, min, max); + } + + // aten::_functional_sym_constrain_range(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor + inline at::Tensor _functional_sym_constrain_range(c10::DispatchKeySet dispatchKeySet, const at::Scalar & size, c10::optional min, c10::optional max, const at::Tensor & dep_token) { + return at::_ops::_functional_sym_constrain_range::redispatch(dispatchKeySet, size, min, max, dep_token); + } + + // aten::_functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor + inline at::Tensor _functional_sym_constrain_range_for_size(c10::DispatchKeySet dispatchKeySet, const at::Scalar & size, c10::optional min, c10::optional max, const at::Tensor & dep_token) { + return at::_ops::_functional_sym_constrain_range_for_size::redispatch(dispatchKeySet, size, min, max, dep_token); + } + + // aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor _make_dep_token(c10::DispatchKeySet dispatchKeySet, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::_make_dep_token::redispatch(dispatchKeySet, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor _make_dep_token(c10::DispatchKeySet dispatchKeySet, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::_make_dep_token::redispatch(dispatchKeySet, dtype, layout, device, pin_memory, memory_format); + } + + // aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a) + inline at::Tensor refine_names(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList names) { + return at::_ops::refine_names::redispatch(dispatchKeySet, self, names); + } + + // aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool + inline bool _use_cudnn_ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank) { + return at::_ops::_use_cudnn_ctc_loss::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank); + } + + // aten::_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool + inline bool _use_cudnn_ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank) { + return at::_ops::_use_cudnn_ctc_loss_Tensor::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank); + } + + // aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) + inline ::std::tuple _cudnn_ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { + return at::_ops::_cudnn_ctc_loss::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity); + } + + // aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) + inline ::std::tuple _cudnn_ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { + return at::_ops::_cudnn_ctc_loss_Tensor::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity); + } + + // aten::_use_cudnn_rnn_flatten_weight() -> bool + inline bool _use_cudnn_rnn_flatten_weight(c10::DispatchKeySet dispatchKeySet) { + return at::_ops::_use_cudnn_rnn_flatten_weight::redispatch(dispatchKeySet); + } + + // aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor + inline at::Tensor _cudnn_rnn_flatten_weight(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight::redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); + } + + // aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor + inline at::Tensor _cudnn_rnn_flatten_weight_symint(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight::redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); + } + + // aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _cudnn_rnn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state) { + return at::_ops::_cudnn_rnn::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state); + } + + // aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _cudnn_rnn_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state) { + return at::_ops::_cudnn_rnn::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state); + } + + // aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) + inline ::std::tuple> _cudnn_rnn_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask); + } + + // aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) + inline ::std::tuple> _cudnn_rnn_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); + } + + // aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor _cudnn_init_dropout_state(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, at::TensorOptions options) { + return at::_ops::_cudnn_init_dropout_state::redispatch(dispatchKeySet, dropout, train, dropout_seed, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor _cudnn_init_dropout_state(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_cudnn_init_dropout_state::redispatch(dispatchKeySet, dropout, train, dropout_seed, dtype, layout, device, pin_memory); + } + + // aten::_debug_has_internal_overlap(Tensor self) -> int + inline int64_t _debug_has_internal_overlap(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_debug_has_internal_overlap::redispatch(dispatchKeySet, self); + } + + // aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor) + inline ::std::tuple _fused_dropout(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional generator=c10::nullopt) { + return at::_ops::_fused_dropout::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor + inline at::Tensor _masked_scale(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, double scale) { + return at::_ops::_masked_scale::redispatch(dispatchKeySet, self, mask, scale); + } + + // aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor) + inline ::std::tuple native_dropout(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, c10::optional train) { + return at::_ops::native_dropout::redispatch(dispatchKeySet, input, p, train); + } + + // aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor + inline at::Tensor native_dropout_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, double scale) { + return at::_ops::native_dropout_backward::redispatch(dispatchKeySet, grad_output, mask, scale); + } + + // aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor) + inline ::std::tuple _sobol_engine_draw(c10::DispatchKeySet dispatchKeySet, const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional dtype) { + return at::_ops::_sobol_engine_draw::redispatch(dispatchKeySet, quasi, n, sobolstate, dimension, num_generated, dtype); + } + + // aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!) + inline at::Tensor & _sobol_engine_ff_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) { + return at::_ops::_sobol_engine_ff_::redispatch(dispatchKeySet, self, n, sobolstate, dimension, num_generated); + } + + // aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!) + inline at::Tensor & _sobol_engine_scramble_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & ltm, int64_t dimension) { + return at::_ops::_sobol_engine_scramble_::redispatch(dispatchKeySet, self, ltm, dimension); + } + + // aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!) + inline at::Tensor & _sobol_engine_initialize_state_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dimension) { + return at::_ops::_sobol_engine_initialize_state_::redispatch(dispatchKeySet, self, dimension); + } + + // aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor + inline at::Tensor _reshape_from_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & shape) { + return at::_ops::_reshape_from_tensor::redispatch(dispatchKeySet, self, shape); + } + + // aten::_shape_as_tensor(Tensor self) -> Tensor + inline at::Tensor _shape_as_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_shape_as_tensor::redispatch(dispatchKeySet, self); + } + + // aten::dropout(Tensor input, float p, bool train) -> Tensor + inline at::Tensor dropout(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) { + return at::_ops::dropout::redispatch(dispatchKeySet, input, p, train); + } + + // aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + inline at::Tensor & dropout_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) { + return at::_ops::dropout_::redispatch(dispatchKeySet, self, p, train); + } + + // aten::feature_dropout(Tensor input, float p, bool train) -> Tensor + inline at::Tensor feature_dropout(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) { + return at::_ops::feature_dropout::redispatch(dispatchKeySet, input, p, train); + } + + // aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + inline at::Tensor & feature_dropout_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) { + return at::_ops::feature_dropout_::redispatch(dispatchKeySet, self, p, train); + } + + // aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor + inline at::Tensor alpha_dropout(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) { + return at::_ops::alpha_dropout::redispatch(dispatchKeySet, input, p, train); + } + + // aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + inline at::Tensor & alpha_dropout_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) { + return at::_ops::alpha_dropout_::redispatch(dispatchKeySet, self, p, train); + } + + // aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor + inline at::Tensor feature_alpha_dropout(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) { + return at::_ops::feature_alpha_dropout::redispatch(dispatchKeySet, input, p, train); + } + + // aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + inline at::Tensor & feature_alpha_dropout_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) { + return at::_ops::feature_alpha_dropout_::redispatch(dispatchKeySet, self, p, train); + } + + // aten::abs(Tensor self) -> Tensor + inline at::Tensor abs(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::abs::redispatch(dispatchKeySet, self); + } + + // aten::abs_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & abs_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::abs_::redispatch(dispatchKeySet, self); + } + + // aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & abs_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::abs_out::redispatch(dispatchKeySet, self, out); + } + + // aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & abs_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::abs_out::redispatch(dispatchKeySet, self, out); + } + + // aten::absolute(Tensor self) -> Tensor + inline at::Tensor absolute(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::absolute::redispatch(dispatchKeySet, self); + } + + // aten::absolute_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & absolute_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::absolute_::redispatch(dispatchKeySet, self); + } + + // aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & absolute_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::absolute_out::redispatch(dispatchKeySet, self, out); + } + + // aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & absolute_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::absolute_out::redispatch(dispatchKeySet, self, out); + } + + // aten::angle(Tensor self) -> Tensor + inline at::Tensor angle(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::angle::redispatch(dispatchKeySet, self); + } + + // aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & angle_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::angle_out::redispatch(dispatchKeySet, self, out); + } + + // aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & angle_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::angle_out::redispatch(dispatchKeySet, self, out); + } + + // aten::view_as_real(Tensor(a) self) -> Tensor(a) + inline at::Tensor view_as_real(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::view_as_real::redispatch(dispatchKeySet, self); + } + + // aten::view_as_complex(Tensor(a) self) -> Tensor(a) + inline at::Tensor view_as_complex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::view_as_complex::redispatch(dispatchKeySet, self); + } + + // aten::sgn(Tensor self) -> Tensor + inline at::Tensor sgn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sgn::redispatch(dispatchKeySet, self); + } + + // aten::sgn_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sgn_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sgn_::redispatch(dispatchKeySet, self); + } + + // aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sgn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sgn_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sgn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sgn_out::redispatch(dispatchKeySet, self, out); + } + + // aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor chalf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::chalf::redispatch(dispatchKeySet, self, memory_format); + } + + // aten::real(Tensor(a) self) -> Tensor(a) + inline at::Tensor real(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::real::redispatch(dispatchKeySet, self); + } + + // aten::imag(Tensor(a) self) -> Tensor(a) + inline at::Tensor imag(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::imag::redispatch(dispatchKeySet, self); + } + + // aten::_conj(Tensor(a) self) -> Tensor(a) + inline at::Tensor _conj(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_conj::redispatch(dispatchKeySet, self); + } + + // aten::conj(Tensor(a) self) -> Tensor(a) + inline at::Tensor __dispatch_conj(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::conj::redispatch(dispatchKeySet, self); + } + + // aten::_conj_physical(Tensor self) -> Tensor + inline at::Tensor _conj_physical(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_conj_physical::redispatch(dispatchKeySet, self); + } + + // aten::conj_physical(Tensor self) -> Tensor + inline at::Tensor conj_physical(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::conj_physical::redispatch(dispatchKeySet, self); + } + + // aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conj_physical_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::conj_physical_out::redispatch(dispatchKeySet, self, out); + } + + // aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conj_physical_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::conj_physical_out::redispatch(dispatchKeySet, self, out); + } + + // aten::conj_physical_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & conj_physical_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::conj_physical_::redispatch(dispatchKeySet, self); + } + + // aten::resolve_conj(Tensor(a) self) -> Tensor(a) + inline at::Tensor resolve_conj(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::resolve_conj::redispatch(dispatchKeySet, self); + } + + // aten::resolve_neg(Tensor(a) self) -> Tensor(a) + inline at::Tensor resolve_neg(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::resolve_neg::redispatch(dispatchKeySet, self); + } + + // aten::_neg_view(Tensor(a) self) -> Tensor(a) + inline at::Tensor _neg_view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_neg_view::redispatch(dispatchKeySet, self); + } + + // aten::acos(Tensor self) -> Tensor + inline at::Tensor acos(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::acos::redispatch(dispatchKeySet, self); + } + + // aten::acos_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & acos_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::acos_::redispatch(dispatchKeySet, self); + } + + // aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & acos_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::acos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & acos_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::acos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arccos(Tensor self) -> Tensor + inline at::Tensor arccos(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::arccos::redispatch(dispatchKeySet, self); + } + + // aten::arccos_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & arccos_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::arccos_::redispatch(dispatchKeySet, self); + } + + // aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arccos_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::arccos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arccos_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::arccos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor + inline at::Tensor avg_pool1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true) { + return at::_ops::avg_pool1d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad); + } + + // aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor + inline at::Tensor adaptive_avg_pool1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool1d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor) + inline ::std::tuple adaptive_max_pool1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool1d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + inline at::Tensor add(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::add_Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & add_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::add__Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & add_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::add_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & add_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::add_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + inline at::Tensor _add_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_add_relu_Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & _add_relu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_add_relu__Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _add_relu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_add_relu_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _add_relu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::_add_relu_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + inline at::Tensor _add_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::_add_relu_Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & _add_relu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::_add_relu__Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + inline at::Tensor add(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::add_Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & add_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::add__Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor addmv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmv::redispatch(dispatchKeySet, self, mat, vec, beta, alpha); + } + + // aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & addmv_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmv_::redispatch(dispatchKeySet, self, mat, vec, beta, alpha); + } + + // aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addmv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmv_out::redispatch(dispatchKeySet, self, mat, vec, beta, alpha, out); + } + + // aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addmv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::addmv_out::redispatch(dispatchKeySet, self, mat, vec, beta, alpha, out); + } + + // aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor addr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addr::redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha); + } + + // aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & addr_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addr_::redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha); + } + + // aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addr_out::redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha, out); + } + + // aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::addr_out::redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha, out); + } + + // aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor + inline at::Tensor affine_grid_generator(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, at::IntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator::redispatch(dispatchKeySet, theta, c10::fromIntArrayRefSlow(size), align_corners); + } + + // aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor + inline at::Tensor affine_grid_generator_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator::redispatch(dispatchKeySet, theta, size, align_corners); + } + + // aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor + inline at::Tensor affine_grid_generator_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, at::IntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_backward::redispatch(dispatchKeySet, grad, c10::fromIntArrayRefSlow(size), align_corners); + } + + // aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor + inline at::Tensor affine_grid_generator_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_backward::redispatch(dispatchKeySet, grad, size, align_corners); + } + + // aten::_is_all_true(Tensor self) -> Tensor + inline at::Tensor _is_all_true(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_is_all_true::redispatch(dispatchKeySet, self); + } + + // aten::_is_any_true(Tensor self) -> Tensor + inline at::Tensor _is_any_true(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_is_any_true::redispatch(dispatchKeySet, self); + } + + // aten::_test_check_tensor(Tensor self) -> Tensor + inline at::Tensor _test_check_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_test_check_tensor::redispatch(dispatchKeySet, self); + } + + // aten::_test_functorch_fallback(Tensor self, Tensor other) -> Tensor + inline at::Tensor _test_functorch_fallback(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_test_functorch_fallback::redispatch(dispatchKeySet, self, other); + } + + // aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor + inline at::Tensor all(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::all_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor + inline at::Tensor all(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false) { + return at::_ops::all_dims::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::all_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) { + return at::_ops::all_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false) { + return at::_ops::all_dims_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::all_dims_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + inline at::Tensor all(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::all_dimname::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::all_dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) { + return at::_ops::all_dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool + inline bool allclose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) { + return at::_ops::allclose::redispatch(dispatchKeySet, self, other, rtol, atol, equal_nan); + } + + // aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor + inline at::Tensor any(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::any_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor + inline at::Tensor any(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false) { + return at::_ops::any_dims::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::any_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) { + return at::_ops::any_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false) { + return at::_ops::any_dims_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::any_dims_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + inline at::Tensor any(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::any_dimname::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::any_dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) { + return at::_ops::any_dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor arange(c10::DispatchKeySet dispatchKeySet, const at::Scalar & end, at::TensorOptions options={}) { + return at::_ops::arange::redispatch(dispatchKeySet, end, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor arange(c10::DispatchKeySet dispatchKeySet, const at::Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::arange::redispatch(dispatchKeySet, end, dtype, layout, device, pin_memory); + } + + // aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor arange(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, at::TensorOptions options={}) { + return at::_ops::arange_start::redispatch(dispatchKeySet, start, end, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor arange(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::arange_start::redispatch(dispatchKeySet, start, end, dtype, layout, device, pin_memory); + } + + // aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor arange(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::TensorOptions options={}) { + return at::_ops::arange_start_step::redispatch(dispatchKeySet, start, end, step, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor arange(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::arange_start_step::redispatch(dispatchKeySet, start, end, step, dtype, layout, device, pin_memory); + } + + // aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arange_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & end) { + return at::_ops::arange_out::redispatch(dispatchKeySet, end, out); + } + + // aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arange_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & end, at::Tensor & out) { + return at::_ops::arange_out::redispatch(dispatchKeySet, end, out); + } + + // aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arange_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step) { + return at::_ops::arange_start_out::redispatch(dispatchKeySet, start, end, step, out); + } + + // aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arange_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) { + return at::_ops::arange_start_out::redispatch(dispatchKeySet, start, end, step, out); + } + + // aten::_dim_arange(Tensor like, int dim) -> Tensor + inline at::Tensor _dim_arange(c10::DispatchKeySet dispatchKeySet, const at::Tensor & like, int64_t dim) { + return at::_ops::_dim_arange::redispatch(dispatchKeySet, like, dim); + } + + // aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor + inline at::Tensor argmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::argmax::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & argmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::argmax_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & argmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim, bool keepdim, at::Tensor & out) { + return at::_ops::argmax_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor + inline at::Tensor argmin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::argmin::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & argmin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::argmin_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & argmin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim, bool keepdim, at::Tensor & out) { + return at::_ops::argmin_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::acosh(Tensor self) -> Tensor + inline at::Tensor acosh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::acosh::redispatch(dispatchKeySet, self); + } + + // aten::acosh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & acosh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::acosh_::redispatch(dispatchKeySet, self); + } + + // aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & acosh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::acosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & acosh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::acosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arccosh(Tensor self) -> Tensor + inline at::Tensor arccosh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::arccosh::redispatch(dispatchKeySet, self); + } + + // aten::arccosh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & arccosh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::arccosh_::redispatch(dispatchKeySet, self); + } + + // aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arccosh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::arccosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arccosh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::arccosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::asinh(Tensor self) -> Tensor + inline at::Tensor asinh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::asinh::redispatch(dispatchKeySet, self); + } + + // aten::asinh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & asinh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::asinh_::redispatch(dispatchKeySet, self); + } + + // aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & asinh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::asinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & asinh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::asinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arcsinh(Tensor self) -> Tensor + inline at::Tensor arcsinh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::arcsinh::redispatch(dispatchKeySet, self); + } + + // aten::arcsinh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & arcsinh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::arcsinh_::redispatch(dispatchKeySet, self); + } + + // aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arcsinh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::arcsinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arcsinh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::arcsinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::atanh(Tensor self) -> Tensor + inline at::Tensor atanh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::atanh::redispatch(dispatchKeySet, self); + } + + // aten::atanh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & atanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::atanh_::redispatch(dispatchKeySet, self); + } + + // aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & atanh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::atanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & atanh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::atanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arctanh(Tensor self) -> Tensor + inline at::Tensor arctanh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::arctanh::redispatch(dispatchKeySet, self); + } + + // aten::arctanh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & arctanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::arctanh_::redispatch(dispatchKeySet, self); + } + + // aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arctanh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::arctanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arctanh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::arctanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) + inline at::Tensor as_strided(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); + } + + // aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) + inline at::Tensor as_strided_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided::redispatch(dispatchKeySet, self, size, stride, storage_offset); + } + + // aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) + inline const at::Tensor & as_strided_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); + } + + // aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) + inline const at::Tensor & as_strided__symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_::redispatch(dispatchKeySet, self, size, stride, storage_offset); + } + + // aten::asin(Tensor self) -> Tensor + inline at::Tensor asin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::asin::redispatch(dispatchKeySet, self); + } + + // aten::asin_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & asin_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::asin_::redispatch(dispatchKeySet, self); + } + + // aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & asin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::asin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & asin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::asin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arcsin(Tensor self) -> Tensor + inline at::Tensor arcsin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::arcsin::redispatch(dispatchKeySet, self); + } + + // aten::arcsin_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & arcsin_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::arcsin_::redispatch(dispatchKeySet, self); + } + + // aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arcsin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::arcsin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arcsin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::arcsin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::atan(Tensor self) -> Tensor + inline at::Tensor atan(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::atan::redispatch(dispatchKeySet, self); + } + + // aten::atan_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & atan_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::atan_::redispatch(dispatchKeySet, self); + } + + // aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & atan_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::atan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & atan_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::atan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arctan(Tensor self) -> Tensor + inline at::Tensor arctan(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::arctan::redispatch(dispatchKeySet, self); + } + + // aten::arctan_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & arctan_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::arctan_::redispatch(dispatchKeySet, self); + } + + // aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arctan_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::arctan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arctan_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::arctan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::atleast_1d(Tensor self) -> Tensor + inline at::Tensor atleast_1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::atleast_1d::redispatch(dispatchKeySet, self); + } + + // aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[] + inline ::std::vector atleast_1d(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::atleast_1d_Sequence::redispatch(dispatchKeySet, tensors); + } + + // aten::atleast_2d(Tensor self) -> Tensor + inline at::Tensor atleast_2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::atleast_2d::redispatch(dispatchKeySet, self); + } + + // aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[] + inline ::std::vector atleast_2d(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::atleast_2d_Sequence::redispatch(dispatchKeySet, tensors); + } + + // aten::atleast_3d(Tensor self) -> Tensor + inline at::Tensor atleast_3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::atleast_3d::redispatch(dispatchKeySet, self); + } + + // aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[] + inline ::std::vector atleast_3d(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::atleast_3d_Sequence::redispatch(dispatchKeySet, tensors); + } + + // aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor baddbmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::baddbmm::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha); + } + + // aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & baddbmm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::baddbmm_::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha); + } + + // aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & baddbmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::baddbmm_out::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha, out); + } + + // aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & baddbmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::baddbmm_out::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha, out); + } + + // aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor bartlett_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::TensorOptions options={}) { + return at::_ops::bartlett_window::redispatch(dispatchKeySet, window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor bartlett_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::bartlett_window::redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory); + } + + // aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor bartlett_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::bartlett_window_periodic::redispatch(dispatchKeySet, window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor bartlett_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::bartlett_window_periodic::redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory); + } + + // aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor + inline at::Tensor batch_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { + return at::_ops::batch_norm::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); + } + + // aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor + inline at::Tensor quantized_batch_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) { + return at::_ops::quantized_batch_norm::redispatch(dispatchKeySet, input, weight, bias, mean, var, eps, output_scale, output_zero_point); + } + + // aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int) + inline ::std::tuple _batch_norm_impl_index(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { + return at::_ops::_batch_norm_impl_index::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); + } + + // aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _batch_norm_impl_index_backward(c10::DispatchKeySet dispatchKeySet, int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var_transform, bool train, double eps, ::std::array output_mask, const at::Tensor & reservedSpace) { + return at::_ops::_batch_norm_impl_index_backward::redispatch(dispatchKeySet, impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace); + } + + // aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor + inline at::Tensor bernoulli(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli::redispatch(dispatchKeySet, self, generator); + } + + // aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bernoulli_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bernoulli_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::bernoulli_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & bernoulli_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & p, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli__Tensor::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & bernoulli_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p=0.5, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli__float::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor + inline at::Tensor bernoulli(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli_p::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor + inline at::Tensor bilinear(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional & bias={}) { + return at::_ops::bilinear::redispatch(dispatchKeySet, input1, input2, weight, bias); + } + + // aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor + inline at::Tensor binary_cross_entropy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy::redispatch(dispatchKeySet, self, target, weight, reduction); + } + + // aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & binary_cross_entropy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy_out::redispatch(dispatchKeySet, self, target, weight, reduction, out); + } + + // aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & binary_cross_entropy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & out) { + return at::_ops::binary_cross_entropy_out::redispatch(dispatchKeySet, self, target, weight, reduction, out); + } + + // aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor + inline at::Tensor binary_cross_entropy_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy_backward::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction); + } + + // aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & binary_cross_entropy_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, grad_input); + } + + // aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & binary_cross_entropy_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & grad_input) { + return at::_ops::binary_cross_entropy_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, grad_input); + } + + // aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor + inline at::Tensor binary_cross_entropy_with_logits(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, const c10::optional & pos_weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy_with_logits::redispatch(dispatchKeySet, self, target, weight, pos_weight, reduction); + } + + // aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor + inline at::Tensor bincount(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & weights={}, int64_t minlength=0) { + return at::_ops::bincount::redispatch(dispatchKeySet, self, weights, minlength); + } + + // aten::bitwise_not(Tensor self) -> Tensor + inline at::Tensor bitwise_not(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::bitwise_not::redispatch(dispatchKeySet, self); + } + + // aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & bitwise_not_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::bitwise_not_::redispatch(dispatchKeySet, self); + } + + // aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_not_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::bitwise_not_out::redispatch(dispatchKeySet, self, out); + } + + // aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_not_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::bitwise_not_out::redispatch(dispatchKeySet, self, out); + } + + // aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copysign_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::copysign_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copysign_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::copysign_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor copysign(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::copysign_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & copysign_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::copysign__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor copysign(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::copysign_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & copysign_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::copysign__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copysign_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::copysign_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copysign_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::copysign_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_lazy_clone(Tensor self) -> Tensor + inline at::Tensor _lazy_clone(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_lazy_clone::redispatch(dispatchKeySet, self); + } + + // aten::logical_not(Tensor self) -> Tensor + inline at::Tensor logical_not(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::logical_not::redispatch(dispatchKeySet, self); + } + + // aten::logical_not_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & logical_not_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::logical_not_::redispatch(dispatchKeySet, self); + } + + // aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_not_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::logical_not_out::redispatch(dispatchKeySet, self, out); + } + + // aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_not_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::logical_not_out::redispatch(dispatchKeySet, self, out); + } + + // aten::logical_xor(Tensor self, Tensor other) -> Tensor + inline at::Tensor logical_xor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_xor::redispatch(dispatchKeySet, self, other); + } + + // aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & logical_xor_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_xor_::redispatch(dispatchKeySet, self, other); + } + + // aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_xor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_xor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_xor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logical_xor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logical_and(Tensor self, Tensor other) -> Tensor + inline at::Tensor logical_and(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_and::redispatch(dispatchKeySet, self, other); + } + + // aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & logical_and_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_and_::redispatch(dispatchKeySet, self, other); + } + + // aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_and_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_and_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_and_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logical_and_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logical_or(Tensor self, Tensor other) -> Tensor + inline at::Tensor logical_or(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_or::redispatch(dispatchKeySet, self, other); + } + + // aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & logical_or_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_or_::redispatch(dispatchKeySet, self, other); + } + + // aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_or_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_or_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_or_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logical_or_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor blackman_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::TensorOptions options={}) { + return at::_ops::blackman_window::redispatch(dispatchKeySet, window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor blackman_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::blackman_window::redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory); + } + + // aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor blackman_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::blackman_window_periodic::redispatch(dispatchKeySet, window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor blackman_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::blackman_window_periodic::redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory); + } + + // aten::bmm(Tensor self, Tensor mat2) -> Tensor + inline at::Tensor bmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::bmm::redispatch(dispatchKeySet, self, mat2); + } + + // aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::bmm_out::redispatch(dispatchKeySet, self, mat2, out); + } + + // aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { + return at::_ops::bmm_out::redispatch(dispatchKeySet, self, mat2, out); + } + + // aten::broadcast_tensors(Tensor[] tensors) -> Tensor[] + inline ::std::vector broadcast_tensors(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::broadcast_tensors::redispatch(dispatchKeySet, tensors); + } + + // aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) + inline at::Tensor broadcast_to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::broadcast_to::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size)); + } + + // aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) + inline at::Tensor broadcast_to_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::broadcast_to::redispatch(dispatchKeySet, self, size); + } + + // aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a) + inline at::Tensor _sparse_broadcast_to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_sparse_broadcast_to::redispatch(dispatchKeySet, self, size); + } + + // aten::cat(Tensor[] tensors, int dim=0) -> Tensor + inline at::Tensor cat(c10::DispatchKeySet dispatchKeySet, const at::ITensorListRef & tensors, int64_t dim=0) { + return at::_ops::cat::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cat_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::ITensorListRef & tensors, int64_t dim=0) { + return at::_ops::cat_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cat_outf(c10::DispatchKeySet dispatchKeySet, const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) { + return at::_ops::cat_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor + inline at::Tensor cat(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim) { + return at::_ops::cat_names::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cat_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, at::Dimname dim) { + return at::_ops::cat_names_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cat_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) { + return at::_ops::cat_names_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concat(Tensor[] tensors, int dim=0) -> Tensor + inline at::Tensor concat(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim=0) { + return at::_ops::concat::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concat_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, int64_t dim=0) { + return at::_ops::concat_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concat_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) { + return at::_ops::concat_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor + inline at::Tensor concat(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim) { + return at::_ops::concat_names::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concat_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, at::Dimname dim) { + return at::_ops::concat_names_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concat_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) { + return at::_ops::concat_names_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor + inline at::Tensor concatenate(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim=0) { + return at::_ops::concatenate::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concatenate_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, int64_t dim=0) { + return at::_ops::concatenate_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concatenate_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) { + return at::_ops::concatenate_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor + inline at::Tensor concatenate(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim) { + return at::_ops::concatenate_names::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concatenate_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, at::Dimname dim) { + return at::_ops::concatenate_names_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concatenate_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) { + return at::_ops::concatenate_names_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::block_diag(Tensor[] tensors) -> Tensor + inline at::Tensor block_diag(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::block_diag::redispatch(dispatchKeySet, tensors); + } + + // aten::ceil(Tensor self) -> Tensor + inline at::Tensor ceil(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::ceil::redispatch(dispatchKeySet, self); + } + + // aten::ceil_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & ceil_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::ceil_::redispatch(dispatchKeySet, self); + } + + // aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ceil_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::ceil_out::redispatch(dispatchKeySet, self, out); + } + + // aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ceil_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::ceil_out::redispatch(dispatchKeySet, self, out); + } + + // aten::chain_matmul(Tensor[] matrices) -> Tensor + inline at::Tensor chain_matmul(c10::DispatchKeySet dispatchKeySet, at::TensorList matrices) { + return at::_ops::chain_matmul::redispatch(dispatchKeySet, matrices); + } + + // aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & chain_matmul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList matrices) { + return at::_ops::chain_matmul_out::redispatch(dispatchKeySet, matrices, out); + } + + // aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & chain_matmul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList matrices, at::Tensor & out) { + return at::_ops::chain_matmul_out::redispatch(dispatchKeySet, matrices, out); + } + + // aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] + inline ::std::vector unsafe_chunk(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t chunks, int64_t dim=0) { + return at::_ops::unsafe_chunk::redispatch(dispatchKeySet, self, chunks, dim); + } + + // aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[] + inline ::std::vector chunk(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t chunks, int64_t dim=0) { + return at::_ops::chunk::redispatch(dispatchKeySet, self, chunks, dim); + } + + // aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] + inline ::std::vector tensor_split(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections, int64_t dim=0) { + return at::_ops::tensor_split_sections::redispatch(dispatchKeySet, self, sections, dim); + } + + // aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] + inline ::std::vector tensor_split_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt sections, int64_t dim=0) { + return at::_ops::tensor_split_sections::redispatch(dispatchKeySet, self, sections, dim); + } + + // aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] + inline ::std::vector tensor_split(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices, int64_t dim=0) { + return at::_ops::tensor_split_indices::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(indices), dim); + } + + // aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] + inline ::std::vector tensor_split_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim=0) { + return at::_ops::tensor_split_indices::redispatch(dispatchKeySet, self, indices, dim); + } + + // aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] + inline ::std::vector tensor_split(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim=0) { + return at::_ops::tensor_split_tensor_indices_or_sections::redispatch(dispatchKeySet, self, tensor_indices_or_sections, dim); + } + + // aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor + inline at::Tensor clamp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clamp::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor + inline at::Tensor clamp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clamp_Tensor::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) + inline at::Tensor & clamp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clamp_::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) + inline at::Tensor & clamp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clamp__Tensor::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clamp_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min, const c10::optional & max, at::Tensor & out) { + return at::_ops::clamp_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clamp_Tensor_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min, const c10::optional & max, at::Tensor & out) { + return at::_ops::clamp_Tensor_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clamp_max(Tensor self, Scalar max) -> Tensor + inline at::Tensor clamp_max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max) { + return at::_ops::clamp_max::redispatch(dispatchKeySet, self, max); + } + + // aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor + inline at::Tensor clamp_max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max) { + return at::_ops::clamp_max_Tensor::redispatch(dispatchKeySet, self, max); + } + + // aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) + inline at::Tensor & clamp_max_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & max) { + return at::_ops::clamp_max_::redispatch(dispatchKeySet, self, max); + } + + // aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!) + inline at::Tensor & clamp_max_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & max) { + return at::_ops::clamp_max__Tensor::redispatch(dispatchKeySet, self, max); + } + + // aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_max_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & max) { + return at::_ops::clamp_max_out::redispatch(dispatchKeySet, self, max, out); + } + + // aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_max_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max, at::Tensor & out) { + return at::_ops::clamp_max_out::redispatch(dispatchKeySet, self, max, out); + } + + // aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_max_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & max) { + return at::_ops::clamp_max_Tensor_out::redispatch(dispatchKeySet, self, max, out); + } + + // aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_max_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max, at::Tensor & out) { + return at::_ops::clamp_max_Tensor_out::redispatch(dispatchKeySet, self, max, out); + } + + // aten::clamp_min(Tensor self, Scalar min) -> Tensor + inline at::Tensor clamp_min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min) { + return at::_ops::clamp_min::redispatch(dispatchKeySet, self, min); + } + + // aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor + inline at::Tensor clamp_min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min) { + return at::_ops::clamp_min_Tensor::redispatch(dispatchKeySet, self, min); + } + + // aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) + inline at::Tensor & clamp_min_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min) { + return at::_ops::clamp_min_::redispatch(dispatchKeySet, self, min); + } + + // aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!) + inline at::Tensor & clamp_min_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & min) { + return at::_ops::clamp_min__Tensor::redispatch(dispatchKeySet, self, min); + } + + // aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_min_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & min) { + return at::_ops::clamp_min_out::redispatch(dispatchKeySet, self, min, out); + } + + // aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_min_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min, at::Tensor & out) { + return at::_ops::clamp_min_out::redispatch(dispatchKeySet, self, min, out); + } + + // aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_min_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & min) { + return at::_ops::clamp_min_Tensor_out::redispatch(dispatchKeySet, self, min, out); + } + + // aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_min_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min, at::Tensor & out) { + return at::_ops::clamp_min_Tensor_out::redispatch(dispatchKeySet, self, min, out); + } + + // aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor + inline at::Tensor clip(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clip::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor + inline at::Tensor clip(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clip_Tensor::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) + inline at::Tensor & clip_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clip_::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) + inline at::Tensor & clip_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clip__Tensor::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clip_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clip_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clip_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min, const c10::optional & max, at::Tensor & out) { + return at::_ops::clip_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clip_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clip_Tensor_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clip_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min, const c10::optional & max, at::Tensor & out) { + return at::_ops::clip_Tensor_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::cudnn_is_acceptable(Tensor self) -> bool + inline bool cudnn_is_acceptable(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::cudnn_is_acceptable::redispatch(dispatchKeySet, self); + } + + // aten::complex(Tensor real, Tensor imag) -> Tensor + inline at::Tensor complex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & real, const at::Tensor & imag) { + return at::_ops::complex::redispatch(dispatchKeySet, real, imag); + } + + // aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & complex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & real, const at::Tensor & imag) { + return at::_ops::complex_out::redispatch(dispatchKeySet, real, imag, out); + } + + // aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & complex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & real, const at::Tensor & imag, at::Tensor & out) { + return at::_ops::complex_out::redispatch(dispatchKeySet, real, imag, out); + } + + // aten::polar(Tensor abs, Tensor angle) -> Tensor + inline at::Tensor polar(c10::DispatchKeySet dispatchKeySet, const at::Tensor & abs, const at::Tensor & angle) { + return at::_ops::polar::redispatch(dispatchKeySet, abs, angle); + } + + // aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & polar_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & abs, const at::Tensor & angle) { + return at::_ops::polar_out::redispatch(dispatchKeySet, abs, angle, out); + } + + // aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & polar_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out) { + return at::_ops::polar_out::redispatch(dispatchKeySet, abs, angle, out); + } + + // aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor + inline at::Tensor constant_pad_nd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(pad), value); + } + + // aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor + inline at::Tensor constant_pad_nd_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd::redispatch(dispatchKeySet, self, pad, value); + } + + // aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a) + inline at::Tensor __dispatch_contiguous(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::MemoryFormat memory_format=MemoryFormat::Contiguous) { + return at::_ops::contiguous::redispatch(dispatchKeySet, self, memory_format); + } + + // aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor + inline at::Tensor convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups); + } + + // aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor + inline at::Tensor convolution_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + return at::_ops::convolution::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); + } + + // aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple convolution_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward::redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : c10::nullopt, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask); + } + + // aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple convolution_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::convolution_backward::redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } + + // aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor + inline at::Tensor convolution_overrideable(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_overrideable::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups); + } + + // aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor + inline at::Tensor convolution_overrideable_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + return at::_ops::convolution_overrideable::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); + } + + // aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + inline ::std::tuple convolution_backward_overrideable(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable::redispatch(dispatchKeySet, grad_output, input, weight, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask); + } + + // aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + inline ::std::tuple convolution_backward_overrideable_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable::redispatch(dispatchKeySet, grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } + + // aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor + inline at::Tensor _convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { + return at::_ops::_convolution::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, benchmark, deterministic, cudnn_enabled, allow_tf32); + } + + // aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor + inline at::Tensor _convolution_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { + return at::_ops::_convolution::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32); + } + + // aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor + inline at::Tensor _convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) { + return at::_ops::_convolution_deprecated::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); + } + + // aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor + inline at::Tensor _convolution_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) { + return at::_ops::_convolution_deprecated::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); + } + + // aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor _convolution_mode(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_convolution_mode::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), padding, c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor _convolution_mode_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_convolution_mode::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _convolution_double_backward(c10::DispatchKeySet dispatchKeySet, const c10::optional & ggI, const c10::optional & ggW, const c10::optional & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::_convolution_double_backward::redispatch(dispatchKeySet, ggI, ggW, ggb, gO, weight, self, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask); + } + + // aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _convolution_double_backward_symint(c10::DispatchKeySet dispatchKeySet, const c10::optional & ggI, const c10::optional & ggW, const c10::optional & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::_convolution_double_backward::redispatch(dispatchKeySet, ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } + + // aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor conv1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv1d::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor conv1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { + return at::_ops::conv1d::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor conv2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv2d::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor conv2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { + return at::_ops::conv2d::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor conv3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv3d::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor conv3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { + return at::_ops::conv3d::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding="valid", SymInt[1] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor conv1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv1d_padding::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), padding, c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding="valid", SymInt[1] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor conv1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { + return at::_ops::conv1d_padding::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding="valid", SymInt[2] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor conv2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv2d_padding::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), padding, c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding="valid", SymInt[2] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor conv2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { + return at::_ops::conv2d_padding::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding="valid", SymInt[3] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor conv3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv3d_padding::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), padding, c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding="valid", SymInt[3] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor conv3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { + return at::_ops::conv3d_padding::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor + inline at::Tensor conv_tbc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad=0) { + return at::_ops::conv_tbc::redispatch(dispatchKeySet, self, weight, bias, pad); + } + + // aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor) + inline ::std::tuple conv_tbc_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) { + return at::_ops::conv_tbc_backward::redispatch(dispatchKeySet, self, input, weight, bias, pad); + } + + // aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor + inline at::Tensor conv_transpose1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1) { + return at::_ops::conv_transpose1d::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), groups, c10::fromIntArrayRefSlow(dilation)); + } + + // aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor + inline at::Tensor conv_transpose1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymInt groups=1, c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::conv_transpose1d::redispatch(dispatchKeySet, input, weight, bias, stride, padding, output_padding, groups, dilation); + } + + // aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor + inline at::Tensor conv_transpose2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1) { + return at::_ops::conv_transpose2d_input::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), groups, c10::fromIntArrayRefSlow(dilation)); + } + + // aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor + inline at::Tensor conv_transpose2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymInt groups=1, c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::conv_transpose2d_input::redispatch(dispatchKeySet, input, weight, bias, stride, padding, output_padding, groups, dilation); + } + + // aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor + inline at::Tensor conv_transpose3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1) { + return at::_ops::conv_transpose3d_input::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), groups, c10::fromIntArrayRefSlow(dilation)); + } + + // aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor + inline at::Tensor conv_transpose3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymInt groups=1, c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::conv_transpose3d_input::redispatch(dispatchKeySet, input, weight, bias, stride, padding, output_padding, groups, dilation); + } + + // aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor + inline at::Tensor copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy::redispatch(dispatchKeySet, self, src, non_blocking); + } + + // aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) + inline at::Tensor & copy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy_::redispatch(dispatchKeySet, self, src, non_blocking); + } + + // aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor + inline at::Tensor _copy_from(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, bool non_blocking=false) { + return at::_ops::_copy_from::redispatch(dispatchKeySet, self, dst, non_blocking); + } + + // aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor + inline at::Tensor _copy_from_and_resize(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst) { + return at::_ops::_copy_from_and_resize::redispatch(dispatchKeySet, self, dst); + } + + // aten::cos(Tensor self) -> Tensor + inline at::Tensor cos(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::cos::redispatch(dispatchKeySet, self); + } + + // aten::cos_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & cos_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::cos_::redispatch(dispatchKeySet, self); + } + + // aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cos_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::cos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cos_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::cos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::cosh(Tensor self) -> Tensor + inline at::Tensor cosh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::cosh::redispatch(dispatchKeySet, self); + } + + // aten::cosh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & cosh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::cosh_::redispatch(dispatchKeySet, self); + } + + // aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cosh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::cosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cosh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::cosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor + inline at::Tensor cosine_embedding_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean) { + return at::_ops::cosine_embedding_loss::redispatch(dispatchKeySet, input1, input2, target, margin, reduction); + } + + // aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor + inline at::Tensor count_nonzero(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::count_nonzero_dim_IntList::redispatch(dispatchKeySet, self, dim); + } + + // aten::count_nonzero(Tensor self, int? dim=None) -> Tensor + inline at::Tensor count_nonzero(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim=c10::nullopt) { + return at::_ops::count_nonzero::redispatch(dispatchKeySet, self, dim); + } + + // aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor + inline at::Tensor cov(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t correction=1, const c10::optional & fweights={}, const c10::optional & aweights={}) { + return at::_ops::cov::redispatch(dispatchKeySet, self, correction, fweights, aweights); + } + + // aten::corrcoef(Tensor self) -> Tensor + inline at::Tensor corrcoef(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::corrcoef::redispatch(dispatchKeySet, self); + } + + // aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid + inline at::Tensor cudnn_affine_grid_generator(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) { + return at::_ops::cudnn_affine_grid_generator::redispatch(dispatchKeySet, theta, N, C, H, W); + } + + // aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta + inline at::Tensor cudnn_affine_grid_generator_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) { + return at::_ops::cudnn_affine_grid_generator_backward::redispatch(dispatchKeySet, grad, N, C, H, W); + } + + // aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple cudnn_batch_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::cudnn_batch_norm::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); + } + + // aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor) + inline ::std::tuple cudnn_batch_norm_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const at::Tensor & reserveSpace) { + return at::_ops::cudnn_batch_norm_backward::redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace); + } + + // aten::cudnn_convolution(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + inline at::Tensor cudnn_convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) { + return at::_ops::cudnn_convolution::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, allow_tf32); + } + + // aten::cudnn_convolution(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + inline at::Tensor cudnn_convolution_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) { + return at::_ops::cudnn_convolution::redispatch(dispatchKeySet, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); + } + + // aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) { + return at::_ops::cudnn_convolution_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, allow_tf32, out); + } + + // aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) { + return at::_ops::cudnn_convolution_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, allow_tf32, out); + } + + // aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) { + return at::_ops::cudnn_convolution_out::redispatch(dispatchKeySet, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); + } + + // aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) { + return at::_ops::cudnn_convolution_out::redispatch(dispatchKeySet, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); + } + + // aten::cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + inline at::Tensor cudnn_convolution_transpose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) { + return at::_ops::cudnn_convolution_transpose::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, allow_tf32); + } + + // aten::cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + inline at::Tensor cudnn_convolution_transpose_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) { + return at::_ops::cudnn_convolution_transpose::redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); + } + + // aten::_mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor _mps_convolution_transpose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution_transpose::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::_mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor _mps_convolution_transpose_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_mps_convolution_transpose::redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups); + } + + // aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor) + inline ::std::tuple mps_convolution_transpose_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::redispatch(dispatchKeySet, self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask); + } + + // aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor) + inline ::std::tuple mps_convolution_transpose_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::redispatch(dispatchKeySet, self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask); + } + + // aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor cudnn_convolution_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::cudnn_convolution_relu::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor cudnn_convolution_relu_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::cudnn_convolution_relu::redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups); + } + + // aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor cudnn_convolution_add_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::cudnn_convolution_add_relu::redispatch(dispatchKeySet, self, weight, z, alpha, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor cudnn_convolution_add_relu_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::cudnn_convolution_add_relu::redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups); + } + + // aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output + inline at::Tensor cudnn_grid_sampler(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid) { + return at::_ops::cudnn_grid_sampler::redispatch(dispatchKeySet, self, grid); + } + + // aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid) + inline ::std::tuple cudnn_grid_sampler_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) { + return at::_ops::cudnn_grid_sampler_backward::redispatch(dispatchKeySet, self, grid, grad_output); + } + + // aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) + inline ::std::tuple cummax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::cummax::redispatch(dispatchKeySet, self, dim); + } + + // aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim) { + return at::_ops::cummax_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::cummax_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) + inline ::std::tuple cummax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::cummax_dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim) { + return at::_ops::cummax_dimname_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::cummax_dimname_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () + inline void _cummax_helper(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) { + return at::_ops::_cummax_helper::redispatch(dispatchKeySet, self, values, indices, dim); + } + + // aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) + inline ::std::tuple cummin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::cummin::redispatch(dispatchKeySet, self, dim); + } + + // aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim) { + return at::_ops::cummin_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::cummin_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) + inline ::std::tuple cummin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::cummin_dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim) { + return at::_ops::cummin_dimname_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::cummin_dimname_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () + inline void _cummin_helper(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) { + return at::_ops::_cummin_helper::redispatch(dispatchKeySet, self, values, indices, dim); + } + + // aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor + inline at::Tensor cummaxmin_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) { + return at::_ops::cummaxmin_backward::redispatch(dispatchKeySet, grad, input, indices, dim); + } + + // aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor cumprod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumprod::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) + inline at::Tensor & cumprod_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumprod_::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumprod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumprod_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumprod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::cumprod_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor cumprod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumprod_dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) + inline at::Tensor & cumprod_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumprod__dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumprod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumprod_dimname_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumprod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::cumprod_dimname_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor + inline at::Tensor cumprod_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) { + return at::_ops::cumprod_backward::redispatch(dispatchKeySet, grad, input, dim, output); + } + + // aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor cumsum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) + inline at::Tensor & cumsum_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum_::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumsum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumsum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::cumsum_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor cumsum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum_dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) + inline at::Tensor & cumsum_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum__dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumsum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum_dimname_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumsum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::cumsum_dimname_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor + inline at::Tensor cumulative_trapezoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) { + return at::_ops::cumulative_trapezoid_x::redispatch(dispatchKeySet, y, x, dim); + } + + // aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor + inline at::Tensor cumulative_trapezoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Scalar & dx=1, int64_t dim=-1) { + return at::_ops::cumulative_trapezoid_dx::redispatch(dispatchKeySet, y, dx, dim); + } + + // aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor + inline at::Tensor ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, int64_t reduction=at::Reduction::Mean, bool zero_infinity=false) { + return at::_ops::ctc_loss_IntList::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); + } + + // aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor + inline at::Tensor ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, int64_t reduction=at::Reduction::Mean, bool zero_infinity=false) { + return at::_ops::ctc_loss_Tensor::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); + } + + // aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor) + inline ::std::tuple _ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false) { + return at::_ops::_ctc_loss::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity); + } + + // aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor) + inline ::std::tuple _ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, bool zero_infinity=false) { + return at::_ops::_ctc_loss_Tensor::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity); + } + + // aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor + inline at::Tensor _ctc_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false) { + return at::_ops::_ctc_loss_backward::redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity); + } + + // aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor + inline at::Tensor _ctc_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false) { + return at::_ops::_ctc_loss_backward_Tensor::redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity); + } + + // aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor + inline at::Tensor diag_embed(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) { + return at::_ops::diag_embed::redispatch(dispatchKeySet, self, offset, dim1, dim2); + } + + // aten::diagflat(Tensor self, int offset=0) -> Tensor + inline at::Tensor diagflat(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset=0) { + return at::_ops::diagflat::redispatch(dispatchKeySet, self, offset); + } + + // aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) + inline at::Tensor diagonal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) { + return at::_ops::diagonal::redispatch(dispatchKeySet, self, offset, dim1, dim2); + } + + // aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a) + inline at::Tensor linalg_diagonal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) { + return at::_ops::linalg_diagonal::redispatch(dispatchKeySet, A, offset, dim1, dim2); + } + + // aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) + inline at::Tensor diagonal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset=0) { + return at::_ops::diagonal_Dimname::redispatch(dispatchKeySet, self, outdim, dim1, dim2, offset); + } + + // aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor + inline at::Tensor diagonal_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2); + } + + // aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor + inline at::Tensor diagonal_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward::redispatch(dispatchKeySet, grad_output, input_sizes, offset, dim1, dim2); + } + + // aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) + inline at::Tensor & fill_diagonal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & fill_value, bool wrap=false) { + return at::_ops::fill_diagonal_::redispatch(dispatchKeySet, self, fill_value, wrap); + } + + // aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor + inline at::Tensor diff(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n=1, int64_t dim=-1, const c10::optional & prepend={}, const c10::optional & append={}) { + return at::_ops::diff::redispatch(dispatchKeySet, self, n, dim, prepend, append); + } + + // aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diff_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t n=1, int64_t dim=-1, const c10::optional & prepend={}, const c10::optional & append={}) { + return at::_ops::diff_out::redispatch(dispatchKeySet, self, n, dim, prepend, append, out); + } + + // aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diff_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append, at::Tensor & out) { + return at::_ops::diff_out::redispatch(dispatchKeySet, self, n, dim, prepend, append, out); + } + + // aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & spacing=c10::nullopt, c10::optional dim=c10::nullopt, int64_t edge_order=1) { + return at::_ops::gradient_scalarint::redispatch(dispatchKeySet, self, spacing, dim, edge_order); + } + + // aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_scalararray::redispatch(dispatchKeySet, self, spacing, dim, edge_order); + } + + // aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_array::redispatch(dispatchKeySet, self, dim, edge_order); + } + + // aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ArrayRef spacing, c10::optional dim=c10::nullopt, int64_t edge_order=1) { + return at::_ops::gradient_scalarrayint::redispatch(dispatchKeySet, self, spacing, dim, edge_order); + } + + // aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ArrayRef spacing, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_scalarrayarray::redispatch(dispatchKeySet, self, spacing, dim, edge_order); + } + + // aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList spacing, c10::optional dim=c10::nullopt, int64_t edge_order=1) { + return at::_ops::gradient_tensorarrayint::redispatch(dispatchKeySet, self, spacing, dim, edge_order); + } + + // aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_tensorarray::redispatch(dispatchKeySet, self, spacing, dim, edge_order); + } + + // aten::div.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor div(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::div_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & div_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::div__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::div_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::div_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor + inline at::Tensor div(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + return at::_ops::div_Tensor_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) + inline at::Tensor & div_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + return at::_ops::div__Tensor_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + return at::_ops::div_out_mode::redispatch(dispatchKeySet, self, other, rounding_mode, out); + } + + // aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode, at::Tensor & out) { + return at::_ops::div_out_mode::redispatch(dispatchKeySet, self, other, rounding_mode, out); + } + + // aten::div.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor div(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::div_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & div_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::div__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor + inline at::Tensor div(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + return at::_ops::div_Scalar_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) + inline at::Tensor & div_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + return at::_ops::div__Scalar_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::divide.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::divide_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::divide__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & divide_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::divide_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & divide_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::divide_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::divide.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::divide_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::divide__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor + inline at::Tensor divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + return at::_ops::divide_Tensor_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) + inline at::Tensor & divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + return at::_ops::divide__Tensor_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & divide_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + return at::_ops::divide_out_mode::redispatch(dispatchKeySet, self, other, rounding_mode, out); + } + + // aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & divide_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode, at::Tensor & out) { + return at::_ops::divide_out_mode::redispatch(dispatchKeySet, self, other, rounding_mode, out); + } + + // aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor + inline at::Tensor divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + return at::_ops::divide_Scalar_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) + inline at::Tensor & divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + return at::_ops::divide__Scalar_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor true_divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::true_divide_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & true_divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::true_divide__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & true_divide_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::true_divide_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & true_divide_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::true_divide_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor true_divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::true_divide_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & true_divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::true_divide__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::dot(Tensor self, Tensor tensor) -> Tensor + inline at::Tensor dot(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor) { + return at::_ops::dot::redispatch(dispatchKeySet, self, tensor); + } + + // aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dot_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor) { + return at::_ops::dot_out::redispatch(dispatchKeySet, self, tensor, out); + } + + // aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dot_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor, at::Tensor & out) { + return at::_ops::dot_out::redispatch(dispatchKeySet, self, tensor, out); + } + + // aten::vdot(Tensor self, Tensor other) -> Tensor + inline at::Tensor vdot(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::vdot::redispatch(dispatchKeySet, self, other); + } + + // aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & vdot_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::vdot_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & vdot_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::vdot_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor + inline at::Tensor einsum(c10::DispatchKeySet dispatchKeySet, c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path=c10::nullopt) { + return at::_ops::einsum::redispatch(dispatchKeySet, equation, tensors, path); + } + + // aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor + inline at::Tensor embedding(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) { + return at::_ops::embedding::redispatch(dispatchKeySet, weight, indices, padding_idx, scale_grad_by_freq, sparse); + } + + // aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor + inline at::Tensor embedding_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) { + return at::_ops::embedding::redispatch(dispatchKeySet, weight, indices, padding_idx, scale_grad_by_freq, sparse); + } + + // aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor + inline at::Tensor embedding_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) { + return at::_ops::embedding_backward::redispatch(dispatchKeySet, grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse); + } + + // aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor + inline at::Tensor embedding_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) { + return at::_ops::embedding_backward::redispatch(dispatchKeySet, grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse); + } + + // aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor + inline at::Tensor embedding_dense_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward::redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq); + } + + // aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor + inline at::Tensor embedding_dense_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward::redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq); + } + + // aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!) + inline at::Tensor & embedding_renorm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { + return at::_ops::embedding_renorm_::redispatch(dispatchKeySet, self, indices, max_norm, norm_type); + } + + // aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor + inline at::Tensor embedding_sparse_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_sparse_backward::redispatch(dispatchKeySet, grad, indices, num_weights, padding_idx, scale_grad_by_freq); + } + + // aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _embedding_bag_forward_only(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_forward_only::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); + } + + // aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor) + inline ::std::tuple _rowwise_prune(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) { + return at::_ops::_rowwise_prune::redispatch(dispatchKeySet, weight, mask, compressed_indices_dtype); + } + + // aten::row_stack(Tensor[] tensors) -> Tensor + inline at::Tensor row_stack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::row_stack::redispatch(dispatchKeySet, tensors); + } + + // aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & row_stack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::row_stack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & row_stack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::row_stack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple embedding_bag(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false) { + return at::_ops::embedding_bag::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset); + } + + // aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple embedding_bag(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, c10::optional padding_idx) { + return at::_ops::embedding_bag_padding_idx::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); + } + + // aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _embedding_bag(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); + } + + // aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + inline at::Tensor _embedding_bag_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_backward::redispatch(dispatchKeySet, grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx); + } + + // aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + inline at::Tensor _embedding_bag_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_backward::redispatch(dispatchKeySet, grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx); + } + + // aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + inline at::Tensor _embedding_bag_sparse_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_sparse_backward::redispatch(dispatchKeySet, grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); + } + + // aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + inline at::Tensor _embedding_bag_sparse_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_sparse_backward::redispatch(dispatchKeySet, grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); + } + + // aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + inline at::Tensor _embedding_bag_dense_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_dense_backward::redispatch(dispatchKeySet, grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); + } + + // aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + inline at::Tensor _embedding_bag_dense_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_dense_backward::redispatch(dispatchKeySet, grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); + } + + // aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor + inline at::Tensor _embedding_bag_per_sample_weights_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_per_sample_weights_backward::redispatch(dispatchKeySet, grad, weight, indices, offsets, offset2bag, mode, padding_idx); + } + + // aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_names::redispatch(dispatchKeySet, size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::empty_names::redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory, memory_format); + } + + // aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_memory_format::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::empty_memory_format::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format); + } + + // aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_memory_format::redispatch(dispatchKeySet, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::empty_memory_format::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory, memory_format); + } + + // aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor empty_permuted(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options={}) { + return at::_ops::empty_permuted::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), physical_layout, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor empty_permuted(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::IntArrayRef physical_layout, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::empty_permuted::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), physical_layout, dtype, layout, device, pin_memory); + } + + // aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor empty_permuted_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options={}) { + return at::_ops::empty_permuted::redispatch(dispatchKeySet, size, physical_layout, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor empty_permuted_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::IntArrayRef physical_layout, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::empty_permuted::redispatch(dispatchKeySet, size, physical_layout, dtype, layout, device, pin_memory); + } + + // aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_empty::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_empty::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } + + // aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_empty::redispatch(dispatchKeySet, self, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_empty::redispatch(dispatchKeySet, self, size, dtype, layout, device, pin_memory); + } + + // aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty_strided(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={}) { + return at::_ops::new_empty_strided::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty_strided(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_empty_strided::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype, layout, device, pin_memory); + } + + // aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty_strided_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options={}) { + return at::_ops::new_empty_strided::redispatch(dispatchKeySet, self, size, stride, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty_strided_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_empty_strided::redispatch(dispatchKeySet, self, size, stride, dtype, layout, device, pin_memory); + } + + // aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_full(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::new_full::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_full(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_full::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory); + } + + // aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_full_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::new_full::redispatch(dispatchKeySet, self, size, fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_full_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_full::redispatch(dispatchKeySet, self, size, fill_value, dtype, layout, device, pin_memory); + } + + // aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_zeros(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_zeros::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_zeros(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_zeros::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } + + // aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_zeros_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_zeros::redispatch(dispatchKeySet, self, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_zeros_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_zeros::redispatch(dispatchKeySet, self, size, dtype, layout, device, pin_memory); + } + + // aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_ones(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_ones::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_ones(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_ones::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } + + // aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_ones_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_ones::redispatch(dispatchKeySet, self, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_ones_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_ones::redispatch(dispatchKeySet, self, size, dtype, layout, device, pin_memory); + } + + // aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor + inline at::Tensor _empty_affine_quantized(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor + inline at::Tensor _empty_affine_quantized(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format) { + return at::_ops::_empty_affine_quantized::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, scale, zero_point, memory_format); + } + + // aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor + inline at::Tensor _empty_affine_quantized_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized::redispatch(dispatchKeySet, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor + inline at::Tensor _empty_affine_quantized_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format) { + return at::_ops::_empty_affine_quantized::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory, scale, zero_point, memory_format); + } + + // aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor + inline at::Tensor _empty_per_channel_affine_quantized(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), scales, zero_points, axis, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor + inline at::Tensor _empty_per_channel_affine_quantized(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::_empty_per_channel_affine_quantized::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format); + } + + // aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor + inline at::Tensor _empty_per_channel_affine_quantized_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized::redispatch(dispatchKeySet, size, scales, zero_points, axis, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor + inline at::Tensor _empty_per_channel_affine_quantized_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::_empty_per_channel_affine_quantized::redispatch(dispatchKeySet, size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format); + } + + // aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) + inline const at::Tensor & resize_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), memory_format); + } + + // aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) + inline const at::Tensor & resize__symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_::redispatch(dispatchKeySet, self, size, memory_format); + } + + // aten::_resize_output_(Tensor(a!) self, SymInt[] size, Device device) -> Tensor(a!) + inline const at::Tensor & _resize_output_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Device device) { + return at::_ops::_resize_output_::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), device); + } + + // aten::_resize_output_(Tensor(a!) self, SymInt[] size, Device device) -> Tensor(a!) + inline const at::Tensor & _resize_output__symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) { + return at::_ops::_resize_output_::redispatch(dispatchKeySet, self, size, device); + } + + // aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty_quantized(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & qtensor, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_quantized::redispatch(dispatchKeySet, size, qtensor, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty_quantized(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & qtensor, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::empty_quantized::redispatch(dispatchKeySet, size, qtensor, dtype, layout, device, pin_memory, memory_format); + } + + // aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), memory_format, out); + } + + // aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional memory_format, at::Tensor & out) { + return at::_ops::empty_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), memory_format, out); + } + + // aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_out::redispatch(dispatchKeySet, size, memory_format, out); + } + + // aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional memory_format, at::Tensor & out) { + return at::_ops::empty_out::redispatch(dispatchKeySet, size, memory_format, out); + } + + // aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_like::redispatch(dispatchKeySet, self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::empty_like::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format); + } + + // aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor empty_strided(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={}) { + return at::_ops::empty_strided::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor empty_strided(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::IntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::empty_strided::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype, layout, device, pin_memory); + } + + // aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor empty_strided_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options={}) { + return at::_ops::empty_strided::redispatch(dispatchKeySet, size, stride, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor empty_strided_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::empty_strided::redispatch(dispatchKeySet, size, stride, dtype, layout, device, pin_memory); + } + + // aten::erf(Tensor self) -> Tensor + inline at::Tensor erf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::erf::redispatch(dispatchKeySet, self); + } + + // aten::erf_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & erf_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::erf_::redispatch(dispatchKeySet, self); + } + + // aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & erf_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::erf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & erf_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::erf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::erfc(Tensor self) -> Tensor + inline at::Tensor erfc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::erfc::redispatch(dispatchKeySet, self); + } + + // aten::erfc_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & erfc_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::erfc_::redispatch(dispatchKeySet, self); + } + + // aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & erfc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::erfc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & erfc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::erfc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::exp(Tensor self) -> Tensor + inline at::Tensor exp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::exp::redispatch(dispatchKeySet, self); + } + + // aten::exp_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & exp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::exp_::redispatch(dispatchKeySet, self); + } + + // aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & exp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::exp_out::redispatch(dispatchKeySet, self, out); + } + + // aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & exp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::exp_out::redispatch(dispatchKeySet, self, out); + } + + // aten::exp2(Tensor self) -> Tensor + inline at::Tensor exp2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::exp2::redispatch(dispatchKeySet, self); + } + + // aten::exp2_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & exp2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::exp2_::redispatch(dispatchKeySet, self); + } + + // aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & exp2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::exp2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & exp2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::exp2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::expm1(Tensor self) -> Tensor + inline at::Tensor expm1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::expm1::redispatch(dispatchKeySet, self); + } + + // aten::expm1_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & expm1_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::expm1_::redispatch(dispatchKeySet, self); + } + + // aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & expm1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::expm1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & expm1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::expm1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) + inline at::Tensor expand(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, bool implicit=false) { + return at::_ops::expand::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), implicit); + } + + // aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) + inline at::Tensor expand_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit=false) { + return at::_ops::expand::redispatch(dispatchKeySet, self, size, implicit); + } + + // aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a) + inline at::Tensor expand_as(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::expand_as::redispatch(dispatchKeySet, self, other); + } + + // aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor eye(c10::DispatchKeySet dispatchKeySet, int64_t n, at::TensorOptions options={}) { + return at::_ops::eye::redispatch(dispatchKeySet, n, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor eye(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::eye::redispatch(dispatchKeySet, n, dtype, layout, device, pin_memory); + } + + // aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor eye_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, at::TensorOptions options={}) { + return at::_ops::eye::redispatch(dispatchKeySet, n, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor eye_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::eye::redispatch(dispatchKeySet, n, dtype, layout, device, pin_memory); + } + + // aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor eye(c10::DispatchKeySet dispatchKeySet, int64_t n, int64_t m, at::TensorOptions options={}) { + return at::_ops::eye_m::redispatch(dispatchKeySet, n, m, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor eye(c10::DispatchKeySet dispatchKeySet, int64_t n, int64_t m, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::eye_m::redispatch(dispatchKeySet, n, m, dtype, layout, device, pin_memory); + } + + // aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor eye_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::SymInt m, at::TensorOptions options={}) { + return at::_ops::eye_m::redispatch(dispatchKeySet, n, m, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor eye_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::SymInt m, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::eye_m::redispatch(dispatchKeySet, n, m, dtype, layout, device, pin_memory); + } + + // aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eye_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n) { + return at::_ops::eye_out::redispatch(dispatchKeySet, n, out); + } + + // aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eye_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, at::Tensor & out) { + return at::_ops::eye_out::redispatch(dispatchKeySet, n, out); + } + + // aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eye_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymInt n) { + return at::_ops::eye_out::redispatch(dispatchKeySet, n, out); + } + + // aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eye_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, at::Tensor & out) { + return at::_ops::eye_out::redispatch(dispatchKeySet, n, out); + } + + // aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eye_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n, int64_t m) { + return at::_ops::eye_m_out::redispatch(dispatchKeySet, n, m, out); + } + + // aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eye_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, int64_t m, at::Tensor & out) { + return at::_ops::eye_m_out::redispatch(dispatchKeySet, n, m, out); + } + + // aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eye_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymInt n, c10::SymInt m) { + return at::_ops::eye_m_out::redispatch(dispatchKeySet, n, m, out); + } + + // aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eye_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::SymInt m, at::Tensor & out) { + return at::_ops::eye_m_out::redispatch(dispatchKeySet, n, m, out); + } + + // aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a) + inline at::Tensor flatten(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t start_dim=0, int64_t end_dim=-1) { + return at::_ops::flatten_using_ints::redispatch(dispatchKeySet, self, start_dim, end_dim); + } + + // aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a) + inline at::Tensor flatten(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) { + return at::_ops::flatten_named_out_dim::redispatch(dispatchKeySet, self, start_dim, end_dim, out_dim); + } + + // aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a) + inline at::Tensor flatten(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) { + return at::_ops::flatten_using_names::redispatch(dispatchKeySet, self, start_dim, end_dim, out_dim); + } + + // aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a) + inline at::Tensor flatten(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) { + return at::_ops::flatten_DimnameList::redispatch(dispatchKeySet, self, dims, out_dim); + } + + // aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) + inline at::Tensor unflatten(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) { + return at::_ops::unflatten_int::redispatch(dispatchKeySet, self, dim, c10::fromIntArrayRefSlow(sizes)); + } + + // aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) + inline at::Tensor unflatten_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymIntArrayRef sizes) { + return at::_ops::unflatten_int::redispatch(dispatchKeySet, self, dim, sizes); + } + + // aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) + inline at::Tensor unflatten(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) { + return at::_ops::unflatten_Dimname::redispatch(dispatchKeySet, self, dim, c10::fromIntArrayRefSlow(sizes), names); + } + + // aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) + inline at::Tensor unflatten_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::SymIntArrayRef sizes, at::DimnameList names) { + return at::_ops::unflatten_Dimname::redispatch(dispatchKeySet, self, dim, sizes, names); + } + + // aten::fill.Scalar(Tensor self, Scalar value) -> Tensor + inline at::Tensor fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value) { + return at::_ops::fill_Scalar::redispatch(dispatchKeySet, self, value); + } + + // aten::fill.Tensor(Tensor self, Tensor value) -> Tensor + inline at::Tensor fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value) { + return at::_ops::fill_Tensor::redispatch(dispatchKeySet, self, value); + } + + // aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) + inline at::Tensor & fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & value) { + return at::_ops::fill__Scalar::redispatch(dispatchKeySet, self, value); + } + + // aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) + inline at::Tensor & fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & value) { + return at::_ops::fill__Tensor::redispatch(dispatchKeySet, self, value); + } + + // aten::floor(Tensor self) -> Tensor + inline at::Tensor floor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::floor::redispatch(dispatchKeySet, self); + } + + // aten::floor_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & floor_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::floor_::redispatch(dispatchKeySet, self); + } + + // aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & floor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::floor_out::redispatch(dispatchKeySet, self, out); + } + + // aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & floor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::floor_out::redispatch(dispatchKeySet, self, out); + } + + // aten::floor_divide(Tensor self, Tensor other) -> Tensor + inline at::Tensor floor_divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::floor_divide::redispatch(dispatchKeySet, self, other); + } + + // aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & floor_divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::floor_divide__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & floor_divide_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::floor_divide_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & floor_divide_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::floor_divide_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor floor_divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::floor_divide_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & floor_divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::floor_divide__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::frac(Tensor self) -> Tensor + inline at::Tensor frac(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::frac::redispatch(dispatchKeySet, self); + } + + // aten::frac_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & frac_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::frac_::redispatch(dispatchKeySet, self); + } + + // aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & frac_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::frac_out::redispatch(dispatchKeySet, self, out); + } + + // aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & frac_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::frac_out::redispatch(dispatchKeySet, self, out); + } + + // aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor full(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, at::TensorOptions options={}) { + return at::_ops::full_names::redispatch(dispatchKeySet, size, fill_value, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor full(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::full_names::redispatch(dispatchKeySet, size, fill_value, names, dtype, layout, device, pin_memory); + } + + // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor full(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor full(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::full::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory); + } + + // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor full_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::redispatch(dispatchKeySet, size, fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor full_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::full::redispatch(dispatchKeySet, size, fill_value, dtype, layout, device, pin_memory); + } + + // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), fill_value, out); + } + + // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), fill_value, out); + } + + // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::redispatch(dispatchKeySet, size, fill_value, out); + } + + // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::redispatch(dispatchKeySet, size, fill_value, out); + } + + // aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor full_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::full_like::redispatch(dispatchKeySet, self, fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor full_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::full_like::redispatch(dispatchKeySet, self, fill_value, dtype, layout, device, pin_memory, memory_format); + } + + // aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor from_file(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, c10::optional shared=c10::nullopt, c10::optional size=0, at::TensorOptions options={}) { + return at::_ops::from_file::redispatch(dispatchKeySet, filename, shared, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor from_file(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, c10::optional shared, c10::optional size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::from_file::redispatch(dispatchKeySet, filename, shared, size, dtype, layout, device, pin_memory); + } + + // aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gcd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gcd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::gcd_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::gcd(Tensor self, Tensor other) -> Tensor + inline at::Tensor gcd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd::redispatch(dispatchKeySet, self, other); + } + + // aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & gcd_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd_::redispatch(dispatchKeySet, self, other); + } + + // aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lcm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lcm_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lcm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::lcm_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::lcm(Tensor self, Tensor other) -> Tensor + inline at::Tensor lcm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lcm::redispatch(dispatchKeySet, self, other); + } + + // aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & lcm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::lcm_::redispatch(dispatchKeySet, self, other); + } + + // aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + inline at::Tensor grid_sampler(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners); + } + + // aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + inline at::Tensor grid_sampler_2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_2d::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners); + } + + // aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) + inline ::std::tuple grid_sampler_2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_2d_backward::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); + } + + // aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + inline at::Tensor _grid_sampler_2d_cpu_fallback(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::_grid_sampler_2d_cpu_fallback::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners); + } + + // aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) + inline ::std::tuple _grid_sampler_2d_cpu_fallback_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::_grid_sampler_2d_cpu_fallback_backward::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners); + } + + // aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + inline at::Tensor grid_sampler_3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_3d::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners); + } + + // aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) + inline ::std::tuple grid_sampler_3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_3d_backward::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); + } + + // aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hann_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::TensorOptions options={}) { + return at::_ops::hann_window::redispatch(dispatchKeySet, window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hann_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hann_window::redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory); + } + + // aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hann_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::hann_window_periodic::redispatch(dispatchKeySet, window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hann_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hann_window_periodic::redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory); + } + + // aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::TensorOptions options={}) { + return at::_ops::hamming_window::redispatch(dispatchKeySet, window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hamming_window::redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory); + } + + // aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic::redispatch(dispatchKeySet, window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hamming_window_periodic::redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory); + } + + // aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic_alpha::redispatch(dispatchKeySet, window_length, periodic, alpha, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hamming_window_periodic_alpha::redispatch(dispatchKeySet, window_length, periodic, alpha, dtype, layout, device, pin_memory); + } + + // aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic_alpha_beta::redispatch(dispatchKeySet, window_length, periodic, alpha, beta, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hamming_window_periodic_alpha_beta::redispatch(dispatchKeySet, window_length, periodic, alpha, beta, dtype, layout, device, pin_memory); + } + + // aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor kaiser_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::TensorOptions options={}) { + return at::_ops::kaiser_window::redispatch(dispatchKeySet, window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor kaiser_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::kaiser_window::redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory); + } + + // aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor kaiser_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::kaiser_window_periodic::redispatch(dispatchKeySet, window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor kaiser_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::kaiser_window_periodic::redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory); + } + + // aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor kaiser_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, at::TensorOptions options={}) { + return at::_ops::kaiser_window_beta::redispatch(dispatchKeySet, window_length, periodic, beta, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor kaiser_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::kaiser_window_beta::redispatch(dispatchKeySet, window_length, periodic, beta, dtype, layout, device, pin_memory); + } + + // aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor + inline at::Tensor hinge_embedding_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, double margin=1.0, int64_t reduction=at::Reduction::Mean) { + return at::_ops::hinge_embedding_loss::redispatch(dispatchKeySet, self, target, margin, reduction); + } + + // aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor + inline at::Tensor group_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t num_groups, const c10::optional & weight={}, const c10::optional & bias={}, double eps=1e-05, bool cudnn_enabled=true) { + return at::_ops::group_norm::redispatch(dispatchKeySet, input, num_groups, weight, bias, eps, cudnn_enabled); + } + + // aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_group_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) { + return at::_ops::native_group_norm::redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps); + } + + // aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_group_norm_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) { + return at::_ops::native_group_norm::redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps); + } + + // aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_group_norm_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array output_mask) { + return at::_ops::native_group_norm_backward::redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask); + } + + // aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_group_norm_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask) { + return at::_ops::native_group_norm_backward::redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask); + } + + // aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor + inline at::Tensor _fft_r2c(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) { + return at::_ops::_fft_r2c::redispatch(dispatchKeySet, self, dim, normalization, onesided); + } + + // aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_r2c_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) { + return at::_ops::_fft_r2c_out::redispatch(dispatchKeySet, self, dim, normalization, onesided, out); + } + + // aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_r2c_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out) { + return at::_ops::_fft_r2c_out::redispatch(dispatchKeySet, self, dim, normalization, onesided, out); + } + + // aten::_fft_c2r(Tensor self, int[] dim, int normalization, SymInt last_dim_size) -> Tensor + inline at::Tensor _fft_c2r(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) { + return at::_ops::_fft_c2r::redispatch(dispatchKeySet, self, dim, normalization, last_dim_size); + } + + // aten::_fft_c2r(Tensor self, int[] dim, int normalization, SymInt last_dim_size) -> Tensor + inline at::Tensor _fft_c2r_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size) { + return at::_ops::_fft_c2r::redispatch(dispatchKeySet, self, dim, normalization, last_dim_size); + } + + // aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, SymInt last_dim_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_c2r_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) { + return at::_ops::_fft_c2r_out::redispatch(dispatchKeySet, self, dim, normalization, last_dim_size, out); + } + + // aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, SymInt last_dim_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_c2r_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size, at::Tensor & out) { + return at::_ops::_fft_c2r_out::redispatch(dispatchKeySet, self, dim, normalization, last_dim_size, out); + } + + // aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, SymInt last_dim_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_c2r_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size) { + return at::_ops::_fft_c2r_out::redispatch(dispatchKeySet, self, dim, normalization, last_dim_size, out); + } + + // aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, SymInt last_dim_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_c2r_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size, at::Tensor & out) { + return at::_ops::_fft_c2r_out::redispatch(dispatchKeySet, self, dim, normalization, last_dim_size, out); + } + + // aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor + inline at::Tensor _fft_c2c(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(dim), normalization, forward); + } + + // aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor + inline at::Tensor _fft_c2c_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c::redispatch(dispatchKeySet, self, dim, normalization, forward); + } + + // aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_c2c_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(dim), normalization, forward, out); + } + + // aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_c2c_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) { + return at::_ops::_fft_c2c_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(dim), normalization, forward, out); + } + + // aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_c2c_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c_out::redispatch(dispatchKeySet, self, dim, normalization, forward, out); + } + + // aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_c2c_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) { + return at::_ops::_fft_c2c_out::redispatch(dispatchKeySet, self, dim, normalization, forward, out); + } + + // aten::_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> () + inline void _validate_compressed_sparse_indices(c10::DispatchKeySet dispatchKeySet, bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) { + return at::_ops::_validate_compressed_sparse_indices::redispatch(dispatchKeySet, is_crow, compressed_idx, plain_idx, cdim, dim, nnz); + } + + // aten::_cufft_get_plan_cache_size(DeviceIndex device_index) -> int + inline int64_t _cufft_get_plan_cache_size(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index) { + return at::_ops::_cufft_get_plan_cache_size::redispatch(dispatchKeySet, device_index); + } + + // aten::_cufft_get_plan_cache_max_size(DeviceIndex device_index) -> int + inline int64_t _cufft_get_plan_cache_max_size(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index) { + return at::_ops::_cufft_get_plan_cache_max_size::redispatch(dispatchKeySet, device_index); + } + + // aten::_cufft_set_plan_cache_max_size(DeviceIndex device_index, int max_size) -> () + inline void _cufft_set_plan_cache_max_size(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index, int64_t max_size) { + return at::_ops::_cufft_set_plan_cache_max_size::redispatch(dispatchKeySet, device_index, max_size); + } + + // aten::_cufft_clear_plan_cache(DeviceIndex device_index) -> () + inline void _cufft_clear_plan_cache(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index) { + return at::_ops::_cufft_clear_plan_cache::redispatch(dispatchKeySet, device_index); + } + + // aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor + inline at::Tensor index(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices) { + return at::_ops::index_Tensor::redispatch(dispatchKeySet, self, indices); + } + + // aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::List> & indices) { + return at::_ops::index_Tensor_out::redispatch(dispatchKeySet, self, indices, out); + } + + // aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, at::Tensor & out) { + return at::_ops::index_Tensor_out::redispatch(dispatchKeySet, self, indices, out); + } + + // aten::_unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor + inline at::Tensor _unsafe_index(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices) { + return at::_ops::_unsafe_index_Tensor::redispatch(dispatchKeySet, self, indices); + } + + // aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy_out::redispatch(dispatchKeySet, self, dim, index, source, out); + } + + // aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) { + return at::_ops::index_copy_out::redispatch(dispatchKeySet, self, dim, index, source, out); + } + + // aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) + inline at::Tensor & index_copy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy_::redispatch(dispatchKeySet, self, dim, index, source); + } + + // aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor + inline at::Tensor index_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy::redispatch(dispatchKeySet, self, dim, index, source); + } + + // aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!) + inline at::Tensor & index_copy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy__dimname::redispatch(dispatchKeySet, self, dim, index, source); + } + + // aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor + inline at::Tensor index_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy_dimname::redispatch(dispatchKeySet, self, dim, index, source); + } + + // aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) + inline at::Tensor & index_put_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::index_put_::redispatch(dispatchKeySet, self, indices, values, accumulate); + } + + // aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor + inline at::Tensor index_put(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::index_put::redispatch(dispatchKeySet, self, indices, values, accumulate); + } + + // aten::_unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor + inline at::Tensor _unsafe_index_put(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::_unsafe_index_put::redispatch(dispatchKeySet, self, indices, values, accumulate); + } + + // aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!) + inline at::Tensor & _index_put_impl_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) { + return at::_ops::_index_put_impl_::redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe); + } + + // aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor + inline at::Tensor instance_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { + return at::_ops::instance_norm::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); + } + + // aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor + inline at::Tensor isclose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) { + return at::_ops::isclose::redispatch(dispatchKeySet, self, other, rtol, atol, equal_nan); + } + + // aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Tensor_out::redispatch(dispatchKeySet, elements, test_elements, assume_unique, invert, out); + } + + // aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Tensor_Tensor_out::redispatch(dispatchKeySet, elements, test_elements, assume_unique, invert, out); + } + + // aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor + inline at::Tensor isin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Tensor::redispatch(dispatchKeySet, elements, test_elements, assume_unique, invert); + } + + // aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Scalar_out::redispatch(dispatchKeySet, elements, test_element, assume_unique, invert, out); + } + + // aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Tensor_Scalar_out::redispatch(dispatchKeySet, elements, test_element, assume_unique, invert, out); + } + + // aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor + inline at::Tensor isin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Scalar::redispatch(dispatchKeySet, elements, test_element, assume_unique, invert); + } + + // aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Scalar_Tensor_out::redispatch(dispatchKeySet, element, test_elements, assume_unique, invert, out); + } + + // aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isin_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Scalar_Tensor_out::redispatch(dispatchKeySet, element, test_elements, assume_unique, invert, out); + } + + // aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor + inline at::Tensor isin(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Scalar_Tensor::redispatch(dispatchKeySet, element, test_elements, assume_unique, invert); + } + + // aten::isnan(Tensor self) -> Tensor + inline at::Tensor isnan(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::isnan::redispatch(dispatchKeySet, self); + } + + // aten::is_distributed(Tensor self) -> bool + inline bool is_distributed(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_distributed::redispatch(dispatchKeySet, self); + } + + // aten::is_floating_point(Tensor self) -> bool + inline bool __dispatch_is_floating_point(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_floating_point::redispatch(dispatchKeySet, self); + } + + // aten::is_complex(Tensor self) -> bool + inline bool __dispatch_is_complex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_complex::redispatch(dispatchKeySet, self); + } + + // aten::is_conj(Tensor self) -> bool + inline bool __dispatch_is_conj(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_conj::redispatch(dispatchKeySet, self); + } + + // aten::_is_zerotensor(Tensor self) -> bool + inline bool __dispatch__is_zerotensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_is_zerotensor::redispatch(dispatchKeySet, self); + } + + // aten::is_neg(Tensor self) -> bool + inline bool __dispatch_is_neg(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_neg::redispatch(dispatchKeySet, self); + } + + // aten::isreal(Tensor self) -> Tensor + inline at::Tensor isreal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::isreal::redispatch(dispatchKeySet, self); + } + + // aten::is_nonzero(Tensor self) -> bool + inline bool is_nonzero(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_nonzero::redispatch(dispatchKeySet, self); + } + + // aten::is_same_size(Tensor self, Tensor other) -> bool + inline bool is_same_size(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::is_same_size::redispatch(dispatchKeySet, self, other); + } + + // aten::is_signed(Tensor self) -> bool + inline bool __dispatch_is_signed(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_signed::redispatch(dispatchKeySet, self); + } + + // aten::is_inference(Tensor self) -> bool + inline bool __dispatch_is_inference(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_inference::redispatch(dispatchKeySet, self); + } + + // aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor + inline at::Tensor kl_div(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, bool log_target=false) { + return at::_ops::kl_div::redispatch(dispatchKeySet, self, target, reduction, log_target); + } + + // aten::kron(Tensor self, Tensor other) -> Tensor + inline at::Tensor kron(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::kron::redispatch(dispatchKeySet, self, other); + } + + // aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kron_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::kron_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kron_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::kron_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple kthvalue(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue::redispatch(dispatchKeySet, self, k, dim, keepdim); + } + + // aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple kthvalue_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue_values::redispatch(dispatchKeySet, self, k, dim, keepdim, values, indices); + } + + // aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple kthvalue_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_values::redispatch(dispatchKeySet, self, k, dim, keepdim, values, indices); + } + + // aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple kthvalue(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname::redispatch(dispatchKeySet, self, k, dim, keepdim); + } + + // aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple kthvalue_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname_out::redispatch(dispatchKeySet, self, k, dim, keepdim, values, indices); + } + + // aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple kthvalue_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_dimname_out::redispatch(dispatchKeySet, self, k, dim, keepdim, values, indices); + } + + // aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor + inline at::Tensor layer_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight={}, const c10::optional & bias={}, double eps=1e-05, bool cudnn_enable=true) { + return at::_ops::layer_norm::redispatch(dispatchKeySet, input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, cudnn_enable); + } + + // aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor + inline at::Tensor layer_norm_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight={}, const c10::optional & bias={}, double eps=1e-05, bool cudnn_enable=true) { + return at::_ops::layer_norm::redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps, cudnn_enable); + } + + // aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_layer_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps) { + return at::_ops::native_layer_norm::redispatch(dispatchKeySet, input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps); + } + + // aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_layer_norm_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps) { + return at::_ops::native_layer_norm::redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps); + } + + // aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_layer_norm_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward::redispatch(dispatchKeySet, grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask); + } + + // aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_layer_norm_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward::redispatch(dispatchKeySet, grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask); + } + + // aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor + inline at::Tensor nan_to_num(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt) { + return at::_ops::nan_to_num::redispatch(dispatchKeySet, self, nan, posinf, neginf); + } + + // aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) + inline at::Tensor & nan_to_num_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt) { + return at::_ops::nan_to_num_::redispatch(dispatchKeySet, self, nan, posinf, neginf); + } + + // aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nan_to_num_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt) { + return at::_ops::nan_to_num_out::redispatch(dispatchKeySet, self, nan, posinf, neginf, out); + } + + // aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nan_to_num_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf, at::Tensor & out) { + return at::_ops::nan_to_num_out::redispatch(dispatchKeySet, self, nan, posinf, neginf, out); + } + + // aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor + inline at::Tensor linear(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}) { + return at::_ops::linear::redispatch(dispatchKeySet, input, weight, bias); + } + + // aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple linear_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + return at::_ops::linear_backward::redispatch(dispatchKeySet, self, grad_output, weight, output_mask); + } + + // aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linear_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}) { + return at::_ops::linear_out::redispatch(dispatchKeySet, input, weight, bias, out); + } + + // aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linear_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::Tensor & out) { + return at::_ops::linear_out::redispatch(dispatchKeySet, input, weight, bias, out); + } + + // aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor + inline at::Tensor mkldnn_linear(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias={}) { + return at::_ops::mkldnn_linear::redispatch(dispatchKeySet, self, weight, bias); + } + + // aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor + inline at::Tensor mkldnn_linear_backward_input(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) { + return at::_ops::mkldnn_linear_backward_input::redispatch(dispatchKeySet, input_size, grad_output, weight); + } + + // aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor) + inline ::std::tuple mkldnn_linear_backward_weights(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) { + return at::_ops::mkldnn_linear_backward_weights::redispatch(dispatchKeySet, grad_output, input, weight, bias_defined); + } + + // aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple mkldnn_linear_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + return at::_ops::mkldnn_linear_backward::redispatch(dispatchKeySet, self, grad_output, weight, output_mask); + } + + // aten::_cslt_compress(Tensor input) -> Tensor + inline at::Tensor _cslt_compress(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) { + return at::_ops::_cslt_compress::redispatch(dispatchKeySet, input); + } + + // aten::_cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0) -> Tensor + inline at::Tensor _cslt_sparse_mm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_A, const at::Tensor & dense_B, const c10::optional & bias={}, const c10::optional & alpha={}, c10::optional out_dtype=c10::nullopt, bool transpose_result=false, int64_t alg_id=0) { + return at::_ops::_cslt_sparse_mm::redispatch(dispatchKeySet, compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id); + } + + // aten::_cslt_sparse_mm_search(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False) -> int + inline int64_t _cslt_sparse_mm_search(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_A, const at::Tensor & dense_B, const c10::optional & bias={}, const c10::optional & alpha={}, c10::optional out_dtype=c10::nullopt, bool transpose_result=false) { + return at::_ops::_cslt_sparse_mm_search::redispatch(dispatchKeySet, compressed_A, dense_B, bias, alpha, out_dtype, transpose_result); + } + + // aten::_sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor + inline at::Tensor _sparse_semi_structured_linear(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const c10::optional & bias={}, c10::optional activation=c10::nullopt, c10::optional out_dtype=c10::nullopt) { + return at::_ops::_sparse_semi_structured_linear::redispatch(dispatchKeySet, input, weight, meta, bias, activation, out_dtype); + } + + // aten::_mixed_dtypes_linear(Tensor input, Tensor weight, Tensor scale, *, Tensor? bias=None, str? activation=None) -> Tensor + inline at::Tensor _mixed_dtypes_linear(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & scale, const c10::optional & bias={}, c10::optional activation=c10::nullopt) { + return at::_ops::_mixed_dtypes_linear::redispatch(dispatchKeySet, input, weight, scale, bias, activation); + } + + // aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor + inline at::Tensor fbgemm_linear_int8_weight_fp32_activation(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) { + return at::_ops::fbgemm_linear_int8_weight_fp32_activation::redispatch(dispatchKeySet, input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); + } + + // aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor + inline at::Tensor fbgemm_linear_int8_weight(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) { + return at::_ops::fbgemm_linear_int8_weight::redispatch(dispatchKeySet, input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); + } + + // aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int) + inline ::std::tuple fbgemm_linear_quantize_weight(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) { + return at::_ops::fbgemm_linear_quantize_weight::redispatch(dispatchKeySet, input); + } + + // aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor + inline at::Tensor fbgemm_pack_gemm_matrix_fp16(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) { + return at::_ops::fbgemm_pack_gemm_matrix_fp16::redispatch(dispatchKeySet, input); + } + + // aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor + inline at::Tensor fbgemm_linear_fp16_weight_fp32_activation(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) { + return at::_ops::fbgemm_linear_fp16_weight_fp32_activation::redispatch(dispatchKeySet, input, packed_weight, bias); + } + + // aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor + inline at::Tensor fbgemm_linear_fp16_weight(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) { + return at::_ops::fbgemm_linear_fp16_weight::redispatch(dispatchKeySet, input, packed_weight, bias); + } + + // aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor + inline at::Tensor fbgemm_pack_quantized_matrix(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) { + return at::_ops::fbgemm_pack_quantized_matrix::redispatch(dispatchKeySet, input); + } + + // aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor + inline at::Tensor fbgemm_pack_quantized_matrix(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t K, int64_t N) { + return at::_ops::fbgemm_pack_quantized_matrix_KN::redispatch(dispatchKeySet, input, K, N); + } + + // aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor ldexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ldexp_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & ldexp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::ldexp_::redispatch(dispatchKeySet, self, other); + } + + // aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ldexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ldexp_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ldexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::ldexp_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor linspace(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}) { + return at::_ops::linspace::redispatch(dispatchKeySet, start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor linspace(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::linspace::redispatch(dispatchKeySet, start, end, steps, dtype, layout, device, pin_memory); + } + + // aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor linspace(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, at::TensorOptions options={}) { + return at::_ops::linspace_Tensor_Tensor::redispatch(dispatchKeySet, start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor linspace(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::linspace_Tensor_Tensor::redispatch(dispatchKeySet, start, end, steps, dtype, layout, device, pin_memory); + } + + // aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor linspace(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}) { + return at::_ops::linspace_Tensor_Scalar::redispatch(dispatchKeySet, start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor linspace(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::linspace_Tensor_Scalar::redispatch(dispatchKeySet, start, end, steps, dtype, layout, device, pin_memory); + } + + // aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor linspace(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, at::TensorOptions options={}) { + return at::_ops::linspace_Scalar_Tensor::redispatch(dispatchKeySet, start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor linspace(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::linspace_Scalar_Tensor::redispatch(dispatchKeySet, start, end, steps, dtype, layout, device, pin_memory); + } + + // aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linspace_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps) { + return at::_ops::linspace_out::redispatch(dispatchKeySet, start, end, steps, out); + } + + // aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linspace_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) { + return at::_ops::linspace_out::redispatch(dispatchKeySet, start, end, steps, out); + } + + // aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linspace_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & start, const at::Tensor & end, int64_t steps) { + return at::_ops::linspace_Tensor_Tensor_out::redispatch(dispatchKeySet, start, end, steps, out); + } + + // aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linspace_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out) { + return at::_ops::linspace_Tensor_Tensor_out::redispatch(dispatchKeySet, start, end, steps, out); + } + + // aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linspace_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & start, const at::Scalar & end, int64_t steps) { + return at::_ops::linspace_Tensor_Scalar_out::redispatch(dispatchKeySet, start, end, steps, out); + } + + // aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linspace_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out) { + return at::_ops::linspace_Tensor_Scalar_out::redispatch(dispatchKeySet, start, end, steps, out); + } + + // aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linspace_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & start, const at::Tensor & end, int64_t steps) { + return at::_ops::linspace_Scalar_Tensor_out::redispatch(dispatchKeySet, start, end, steps, out); + } + + // aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linspace_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out) { + return at::_ops::linspace_Scalar_Tensor_out::redispatch(dispatchKeySet, start, end, steps, out); + } + + // aten::log(Tensor self) -> Tensor + inline at::Tensor log(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::log::redispatch(dispatchKeySet, self); + } + + // aten::log_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & log_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::log_::redispatch(dispatchKeySet, self); + } + + // aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::log_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::log_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log10(Tensor self) -> Tensor + inline at::Tensor log10(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::log10::redispatch(dispatchKeySet, self); + } + + // aten::log10_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & log10_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::log10_::redispatch(dispatchKeySet, self); + } + + // aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log10_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::log10_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log10_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::log10_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log1p(Tensor self) -> Tensor + inline at::Tensor log1p(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::log1p::redispatch(dispatchKeySet, self); + } + + // aten::log1p_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & log1p_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::log1p_::redispatch(dispatchKeySet, self); + } + + // aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log1p_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::log1p_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log1p_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::log1p_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log2(Tensor self) -> Tensor + inline at::Tensor log2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::log2::redispatch(dispatchKeySet, self); + } + + // aten::log2_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & log2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::log2_::redispatch(dispatchKeySet, self); + } + + // aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::log2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::log2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logaddexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logaddexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logaddexp_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logaddexp(Tensor self, Tensor other) -> Tensor + inline at::Tensor logaddexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp::redispatch(dispatchKeySet, self, other); + } + + // aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logaddexp2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp2_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logaddexp2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logaddexp2_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logaddexp2(Tensor self, Tensor other) -> Tensor + inline at::Tensor logaddexp2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp2::redispatch(dispatchKeySet, self, other); + } + + // aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor xlogy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::xlogy_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor + inline at::Tensor xlogy(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::xlogy_Scalar_Self::redispatch(dispatchKeySet, self, other); + } + + // aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor + inline at::Tensor xlogy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::xlogy_Scalar_Other::redispatch(dispatchKeySet, self, other); + } + + // aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & xlogy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::xlogy__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & xlogy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::xlogy__Scalar_Other::redispatch(dispatchKeySet, self, other); + } + + // aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & xlogy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::xlogy_OutTensor::redispatch(dispatchKeySet, self, other, out); + } + + // aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & xlogy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::xlogy_OutTensor::redispatch(dispatchKeySet, self, other, out); + } + + // aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & xlogy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::xlogy_OutScalar_Self::redispatch(dispatchKeySet, self, other, out); + } + + // aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & xlogy_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::xlogy_OutScalar_Self::redispatch(dispatchKeySet, self, other, out); + } + + // aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & xlogy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::xlogy_OutScalar_Other::redispatch(dispatchKeySet, self, other, out); + } + + // aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & xlogy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::xlogy_OutScalar_Other::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor logspace(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0, at::TensorOptions options={}) { + return at::_ops::logspace::redispatch(dispatchKeySet, start, end, steps, base, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor logspace(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::logspace::redispatch(dispatchKeySet, start, end, steps, base, dtype, layout, device, pin_memory); + } + + // aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor logspace(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base=10.0, at::TensorOptions options={}) { + return at::_ops::logspace_Tensor_Tensor::redispatch(dispatchKeySet, start, end, steps, base, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor logspace(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::logspace_Tensor_Tensor::redispatch(dispatchKeySet, start, end, steps, base, dtype, layout, device, pin_memory); + } + + // aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor logspace(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base=10.0, at::TensorOptions options={}) { + return at::_ops::logspace_Tensor_Scalar::redispatch(dispatchKeySet, start, end, steps, base, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor logspace(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::logspace_Tensor_Scalar::redispatch(dispatchKeySet, start, end, steps, base, dtype, layout, device, pin_memory); + } + + // aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor logspace(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base=10.0, at::TensorOptions options={}) { + return at::_ops::logspace_Scalar_Tensor::redispatch(dispatchKeySet, start, end, steps, base, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor logspace(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::logspace_Scalar_Tensor::redispatch(dispatchKeySet, start, end, steps, base, dtype, layout, device, pin_memory); + } + + // aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logspace_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0) { + return at::_ops::logspace_out::redispatch(dispatchKeySet, start, end, steps, base, out); + } + + // aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logspace_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) { + return at::_ops::logspace_out::redispatch(dispatchKeySet, start, end, steps, base, out); + } + + // aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logspace_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base=10.0) { + return at::_ops::logspace_Tensor_Tensor_out::redispatch(dispatchKeySet, start, end, steps, base, out); + } + + // aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logspace_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) { + return at::_ops::logspace_Tensor_Tensor_out::redispatch(dispatchKeySet, start, end, steps, base, out); + } + + // aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logspace_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base=10.0) { + return at::_ops::logspace_Tensor_Scalar_out::redispatch(dispatchKeySet, start, end, steps, base, out); + } + + // aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logspace_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) { + return at::_ops::logspace_Tensor_Scalar_out::redispatch(dispatchKeySet, start, end, steps, base, out); + } + + // aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logspace_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base=10.0) { + return at::_ops::logspace_Scalar_Tensor_out::redispatch(dispatchKeySet, start, end, steps, base, out); + } + + // aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logspace_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) { + return at::_ops::logspace_Scalar_Tensor_out::redispatch(dispatchKeySet, start, end, steps, base, out); + } + + // aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + inline at::Tensor log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::log_softmax_int::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::log_softmax_int_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::log_softmax_int_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::log_softmax_Dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + inline at::Tensor _log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_log_softmax::redispatch(dispatchKeySet, self, dim, half_to_float); + } + + // aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _log_softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_log_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _log_softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { + return at::_ops::_log_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor + inline at::Tensor _log_softmax_backward_data(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + return at::_ops::_log_softmax_backward_data::redispatch(dispatchKeySet, grad_output, output, dim, input_dtype); + } + + // aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _log_softmax_backward_data_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + return at::_ops::_log_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, input_dtype, out); + } + + // aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _log_softmax_backward_data_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) { + return at::_ops::_log_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, input_dtype, out); + } + + // aten::_logcumsumexp(Tensor self, int dim) -> Tensor + inline at::Tensor _logcumsumexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::_logcumsumexp::redispatch(dispatchKeySet, self, dim); + } + + // aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _logcumsumexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::_logcumsumexp_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _logcumsumexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::_logcumsumexp_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::logcumsumexp(Tensor self, int dim) -> Tensor + inline at::Tensor logcumsumexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::logcumsumexp::redispatch(dispatchKeySet, self, dim); + } + + // aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logcumsumexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::logcumsumexp_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logcumsumexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::logcumsumexp_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor + inline at::Tensor logcumsumexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::logcumsumexp_dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logcumsumexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim) { + return at::_ops::logcumsumexp_dimname_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logcumsumexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & out) { + return at::_ops::logcumsumexp_dimname_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + inline at::Tensor logsumexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::logsumexp::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logsumexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::logsumexp_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logsumexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::logsumexp_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor + inline at::Tensor logsumexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim=false) { + return at::_ops::logsumexp_names::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logsumexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false) { + return at::_ops::logsumexp_names_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logsumexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) { + return at::_ops::logsumexp_names_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor + inline at::Tensor margin_ranking_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean) { + return at::_ops::margin_ranking_loss::redispatch(dispatchKeySet, input1, input2, target, margin, reduction); + } + + // aten::matmul(Tensor self, Tensor other) -> Tensor + inline at::Tensor matmul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::matmul::redispatch(dispatchKeySet, self, other); + } + + // aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor) + inline ::std::tuple matmul_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask) { + return at::_ops::matmul_backward::redispatch(dispatchKeySet, grad, self, other, mask); + } + + // aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & matmul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::matmul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & matmul_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::matmul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::matrix_power(Tensor self, int n) -> Tensor + inline at::Tensor matrix_power(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n) { + return at::_ops::matrix_power::redispatch(dispatchKeySet, self, n); + } + + // aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & matrix_power_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t n) { + return at::_ops::matrix_power_out::redispatch(dispatchKeySet, self, n, out); + } + + // aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & matrix_power_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out) { + return at::_ops::matrix_power_out::redispatch(dispatchKeySet, self, n, out); + } + + // aten::matrix_exp(Tensor self) -> Tensor + inline at::Tensor matrix_exp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::matrix_exp::redispatch(dispatchKeySet, self); + } + + // aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor + inline at::Tensor matrix_exp_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad) { + return at::_ops::matrix_exp_backward::redispatch(dispatchKeySet, self, grad); + } + + // aten::_aminmax(Tensor self) -> (Tensor, Tensor) + inline ::std::tuple _aminmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_aminmax::redispatch(dispatchKeySet, self); + } + + // aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple _aminmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::_aminmax_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) + inline ::std::tuple aminmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::aminmax::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max) + inline ::std::tuple aminmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & min, at::Tensor & max, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::aminmax_out::redispatch(dispatchKeySet, self, dim, keepdim, min, max); + } + + // aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max) + inline ::std::tuple aminmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim, bool keepdim, at::Tensor & min, at::Tensor & max) { + return at::_ops::aminmax_out::redispatch(dispatchKeySet, self, dim, keepdim, min, max); + } + + // aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor + inline at::Tensor _compute_linear_combination(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & coefficients) { + return at::_ops::_compute_linear_combination::redispatch(dispatchKeySet, input, coefficients); + } + + // aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _compute_linear_combination_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & coefficients) { + return at::_ops::_compute_linear_combination_out::redispatch(dispatchKeySet, input, coefficients, out); + } + + // aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _compute_linear_combination_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out) { + return at::_ops::_compute_linear_combination_out::redispatch(dispatchKeySet, input, coefficients, out); + } + + // aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::max_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple max_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::max_dim_max::redispatch(dispatchKeySet, self, dim, keepdim, max, max_values); + } + + // aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple max_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) { + return at::_ops::max_dim_max::redispatch(dispatchKeySet, self, dim, keepdim, max, max_values); + } + + // aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::max_names_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple max_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::max_names_dim_max::redispatch(dispatchKeySet, self, dim, keepdim, max, max_values); + } + + // aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple max_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) { + return at::_ops::max_names_dim_max::redispatch(dispatchKeySet, self, dim, keepdim, max, max_values); + } + + // aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor + inline at::Tensor value_selecting_reduction_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::redispatch(dispatchKeySet, grad, dim, indices, c10::fromIntArrayRefSlow(sizes), keepdim); + } + + // aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor + inline at::Tensor value_selecting_reduction_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::redispatch(dispatchKeySet, grad, dim, indices, sizes, keepdim); + } + + // aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor + inline at::Tensor amax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false) { + return at::_ops::amax::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & amax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false) { + return at::_ops::amax_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & amax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::amax_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + inline ::std::tuple max_pool1d_with_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool1d_with_indices::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor max_pool1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool1d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor max_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor max_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor mkldnn_max_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool2d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor mkldnn_max_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool2d_backward::redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor mkldnn_max_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool3d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor mkldnn_max_pool3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool3d_backward::redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor quantized_max_pool1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool1d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor quantized_max_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool2d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor quantized_max_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool3d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor max_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool3d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::mean::redispatch(dispatchKeySet, self, dtype); + } + + // aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::mean_dim::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mean_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::mean_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mean_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::mean_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::mean_names_dim::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mean_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::mean_names_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mean_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::mean_names_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor nanmean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nanmean::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanmean_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nanmean_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanmean_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::nanmean_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::median(Tensor self) -> Tensor + inline at::Tensor median(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::median::redispatch(dispatchKeySet, self); + } + + // aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple median(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::median_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple median_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::median_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple median_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::median_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple median(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::median_names_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple median_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::median_names_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple median_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::median_names_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::nanmedian(Tensor self) -> Tensor + inline at::Tensor nanmedian(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::nanmedian::redispatch(dispatchKeySet, self); + } + + // aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple nanmedian(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::nanmedian_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple nanmedian_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::nanmedian_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple nanmedian_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::nanmedian_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple nanmedian(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::nanmedian_names_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple nanmedian_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::nanmedian_names_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple nanmedian_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::nanmedian_names_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::min_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple min_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::min_dim_min::redispatch(dispatchKeySet, self, dim, keepdim, min, min_indices); + } + + // aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple min_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) { + return at::_ops::min_dim_min::redispatch(dispatchKeySet, self, dim, keepdim, min, min_indices); + } + + // aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::min_names_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple min_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::min_names_dim_min::redispatch(dispatchKeySet, self, dim, keepdim, min, min_indices); + } + + // aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple min_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) { + return at::_ops::min_names_dim_min::redispatch(dispatchKeySet, self, dim, keepdim, min, min_indices); + } + + // aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor + inline at::Tensor amin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false) { + return at::_ops::amin::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & amin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false) { + return at::_ops::amin_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & amin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::amin_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor _mps_convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor _mps_convolution_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_mps_convolution::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups); + } + + // aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple mps_convolution_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward::redispatch(dispatchKeySet, self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask); + } + + // aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple mps_convolution_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward::redispatch(dispatchKeySet, self, grad_output, weight, padding, stride, dilation, groups, output_mask); + } + + // aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor mkldnn_convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::mkldnn_convolution::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor mkldnn_convolution_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::mkldnn_convolution::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups); + } + + // aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple mkldnn_rnn_layer(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) { + return at::_ops::mkldnn_rnn_layer::redispatch(dispatchKeySet, input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train); + } + + // aten::mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple mkldnn_rnn_layer_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) { + return at::_ops::mkldnn_rnn_layer_backward::redispatch(dispatchKeySet, input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace); + } + + // aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) + inline ::std::tuple miopen_batch_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::miopen_batch_norm::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); + } + + // aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor) + inline ::std::tuple miopen_batch_norm_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon) { + return at::_ops::miopen_batch_norm_backward::redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon); + } + + // aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor + inline at::Tensor miopen_convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic); + } + + // aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor + inline at::Tensor miopen_convolution_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); + } + + // aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor + inline at::Tensor miopen_convolution_transpose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution_transpose::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic); + } + + // aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor + inline at::Tensor miopen_convolution_transpose_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution_transpose::redispatch(dispatchKeySet, self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic); + } + + // aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor + inline at::Tensor miopen_depthwise_convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic); + } + + // aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor + inline at::Tensor miopen_depthwise_convolution_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); + } + + // aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor miopen_convolution_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::miopen_convolution_relu::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor miopen_convolution_relu_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::miopen_convolution_relu::redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups); + } + + // aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor miopen_convolution_add_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::miopen_convolution_add_relu::redispatch(dispatchKeySet, self, weight, z, alpha, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor + inline at::Tensor miopen_convolution_add_relu_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::miopen_convolution_add_relu::redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups); + } + + // aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple miopen_rnn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state) { + return at::_ops::miopen_rnn::redispatch(dispatchKeySet, input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state); + } + + // aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) + inline ::std::tuple> miopen_rnn_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::miopen_rnn_backward::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); + } + + // aten::mm(Tensor self, Tensor mat2) -> Tensor + inline at::Tensor mm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::mm::redispatch(dispatchKeySet, self, mat2); + } + + // aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::mm_out::redispatch(dispatchKeySet, self, mat2, out); + } + + // aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { + return at::_ops::mm_out::redispatch(dispatchKeySet, self, mat2, out); + } + + // aten::_int_mm(Tensor self, Tensor mat2) -> Tensor + inline at::Tensor _int_mm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::_int_mm::redispatch(dispatchKeySet, self, mat2); + } + + // aten::_int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _int_mm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::_int_mm_out::redispatch(dispatchKeySet, self, mat2, out); + } + + // aten::_int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _int_mm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { + return at::_ops::_int_mm_out::redispatch(dispatchKeySet, self, mat2, out); + } + + // aten::_convert_weight_to_int4pack(Tensor self, int innerKTiles) -> Tensor + inline at::Tensor _convert_weight_to_int4pack(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t innerKTiles) { + return at::_ops::_convert_weight_to_int4pack::redispatch(dispatchKeySet, self, innerKTiles); + } + + // aten::_weight_int4pack_mm(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor + inline at::Tensor _weight_int4pack_mm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScaleAndZeros) { + return at::_ops::_weight_int4pack_mm::redispatch(dispatchKeySet, self, mat2, qGroupSize, qScaleAndZeros); + } + + // aten::_weight_int8pack_mm(Tensor self, Tensor mat2, Tensor scales) -> Tensor + inline at::Tensor _weight_int8pack_mm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scales) { + return at::_ops::_weight_int8pack_mm::redispatch(dispatchKeySet, self, mat2, scales); + } + + // aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor + inline at::Tensor _sparse_mm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sparse, const at::Tensor & dense) { + return at::_ops::_sparse_mm::redispatch(dispatchKeySet, sparse, dense); + } + + // aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor + inline at::Tensor _sparse_mm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) { + return at::_ops::_sparse_mm_reduce::redispatch(dispatchKeySet, sparse, dense, reduce); + } + + // aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor + inline at::Tensor _sparse_sparse_matmul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_sparse_sparse_matmul::redispatch(dispatchKeySet, self, other); + } + + // aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple mode(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=-1, bool keepdim=false) { + return at::_ops::mode::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple mode_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim=-1, bool keepdim=false) { + return at::_ops::mode_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple mode_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::mode_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple mode(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::mode_dimname::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple mode_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::mode_dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple mode_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::mode_dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::mul.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor mul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::mul_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & mul_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::mul__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::mul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mul_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::mul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::mul.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor mul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::mul_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & mul_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::mul__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor multiply(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::multiply_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & multiply_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::multiply__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multiply_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::multiply_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multiply_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::multiply_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor multiply(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::multiply_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & multiply_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::multiply__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::mv(Tensor self, Tensor vec) -> Tensor + inline at::Tensor mv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec) { + return at::_ops::mv::redispatch(dispatchKeySet, self, vec); + } + + // aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & vec) { + return at::_ops::mv_out::redispatch(dispatchKeySet, self, vec, out); + } + + // aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec, at::Tensor & out) { + return at::_ops::mv_out::redispatch(dispatchKeySet, self, vec, out); + } + + // aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mvlgamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t p) { + return at::_ops::mvlgamma_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mvlgamma_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out) { + return at::_ops::mvlgamma_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::mvlgamma(Tensor self, int p) -> Tensor + inline at::Tensor mvlgamma(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p) { + return at::_ops::mvlgamma::redispatch(dispatchKeySet, self, p); + } + + // aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) + inline at::Tensor & mvlgamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t p) { + return at::_ops::mvlgamma_::redispatch(dispatchKeySet, self, p); + } + + // aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor + inline at::Tensor narrow_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, int64_t start, int64_t length) { + return at::_ops::narrow_copy::redispatch(dispatchKeySet, self, dim, start, length); + } + + // aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor + inline at::Tensor narrow_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { + return at::_ops::narrow_copy::redispatch(dispatchKeySet, self, dim, start, length); + } + + // aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & narrow_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t start, int64_t length) { + return at::_ops::narrow_copy_out::redispatch(dispatchKeySet, self, dim, start, length, out); + } + + // aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & narrow_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, int64_t start, int64_t length, at::Tensor & out) { + return at::_ops::narrow_copy_out::redispatch(dispatchKeySet, self, dim, start, length, out); + } + + // aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & narrow_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { + return at::_ops::narrow_copy_out::redispatch(dispatchKeySet, self, dim, start, length, out); + } + + // aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & narrow_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) { + return at::_ops::narrow_copy_out::redispatch(dispatchKeySet, self, dim, start, length, out); + } + + // aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) + inline at::Tensor narrow(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, int64_t start, int64_t length) { + return at::_ops::narrow::redispatch(dispatchKeySet, self, dim, start, length); + } + + // aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) + inline at::Tensor narrow_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { + return at::_ops::narrow::redispatch(dispatchKeySet, self, dim, start, length); + } + + // aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) + inline at::Tensor narrow(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & start, int64_t length) { + return at::_ops::narrow_Tensor::redispatch(dispatchKeySet, self, dim, start, length); + } + + // aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) + inline at::Tensor narrow_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) { + return at::_ops::narrow_Tensor::redispatch(dispatchKeySet, self, dim, start, length); + } + + // aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_batch_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps) { + return at::_ops::native_batch_norm::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps); + } + + // aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_batch_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps) { + return at::_ops::native_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd); + } + + // aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_batch_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) { + return at::_ops::native_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd); + } + + // aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _native_batch_norm_legit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) { + return at::_ops::_native_batch_norm_legit::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps); + } + + // aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _native_batch_norm_legit_no_training(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) { + return at::_ops::_native_batch_norm_legit_no_training::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, momentum, eps); + } + + // aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!)) + inline ::std::tuple _native_batch_norm_legit_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) { + return at::_ops::_native_batch_norm_legit_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd); + } + + // aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!)) + inline ::std::tuple _native_batch_norm_legit_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) { + return at::_ops::_native_batch_norm_legit_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd); + } + + // aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _native_batch_norm_legit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps) { + return at::_ops::_native_batch_norm_legit_no_stats::redispatch(dispatchKeySet, input, weight, bias, training, momentum, eps); + } + + // aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _native_batch_norm_legit_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps) { + return at::_ops::_native_batch_norm_legit_no_stats_out::redispatch(dispatchKeySet, input, weight, bias, training, momentum, eps, out, save_mean, save_invstd); + } + + // aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _native_batch_norm_legit_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) { + return at::_ops::_native_batch_norm_legit_no_stats_out::redispatch(dispatchKeySet, input, weight, bias, training, momentum, eps, out, save_mean, save_invstd); + } + + // aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) + inline ::std::tuple batch_norm_stats(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double eps) { + return at::_ops::batch_norm_stats::redispatch(dispatchKeySet, input, eps); + } + + // aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor + inline at::Tensor batch_norm_elemt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) { + return at::_ops::batch_norm_elemt::redispatch(dispatchKeySet, input, weight, bias, mean, invstd, eps); + } + + // aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & batch_norm_elemt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) { + return at::_ops::batch_norm_elemt_out::redispatch(dispatchKeySet, input, weight, bias, mean, invstd, eps, out); + } + + // aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & batch_norm_elemt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps, at::Tensor & out) { + return at::_ops::batch_norm_elemt_out::redispatch(dispatchKeySet, input, weight, bias, mean, invstd, eps, out); + } + + // aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor) + inline ::std::tuple batch_norm_gather_stats(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, int64_t count) { + return at::_ops::batch_norm_gather_stats::redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, count); + } + + // aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor) + inline ::std::tuple batch_norm_gather_stats_with_counts(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const at::Tensor & counts) { + return at::_ops::batch_norm_gather_stats_with_counts::redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, counts); + } + + // aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_batch_norm_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask) { + return at::_ops::native_batch_norm_backward::redispatch(dispatchKeySet, grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask); + } + + // aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple batch_norm_backward_reduce(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g) { + return at::_ops::batch_norm_backward_reduce::redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g); + } + + // aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count) -> Tensor + inline at::Tensor batch_norm_backward_elemt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count) { + return at::_ops::batch_norm_backward_elemt::redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); + } + + // aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor) + inline ::std::tuple batch_norm_update_stats(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum) { + return at::_ops::batch_norm_update_stats::redispatch(dispatchKeySet, input, running_mean, running_var, momentum); + } + + // aten::is_vulkan_available() -> bool + inline bool is_vulkan_available(c10::DispatchKeySet dispatchKeySet) { + return at::_ops::is_vulkan_available::redispatch(dispatchKeySet); + } + + // aten::_nnpack_available() -> bool + inline bool _nnpack_available(c10::DispatchKeySet dispatchKeySet) { + return at::_ops::_nnpack_available::redispatch(dispatchKeySet); + } + + // aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor + inline at::Tensor _nnpack_spatial_convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) { + return at::_ops::_nnpack_spatial_convolution::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride)); + } + + // aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor + inline at::Tensor _nnpack_spatial_convolution_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) { + return at::_ops::_nnpack_spatial_convolution::redispatch(dispatchKeySet, input, weight, bias, padding, stride); + } + + // aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor ones(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::ones_names::redispatch(dispatchKeySet, size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor ones(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::ones_names::redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory); + } + + // aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor ones(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::ones::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor ones(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::ones::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } + + // aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor ones_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::ones::redispatch(dispatchKeySet, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor ones_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::ones::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory); + } + + // aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size) { + return at::_ops::ones_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), out); + } + + // aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::ones_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), out); + } + + // aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::ones_out::redispatch(dispatchKeySet, size, out); + } + + // aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::ones_out::redispatch(dispatchKeySet, size, out); + } + + // aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor ones_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::ones_like::redispatch(dispatchKeySet, self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor ones_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::ones_like::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format); + } + + // aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor + inline at::Tensor pairwise_distance(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p=2, double eps=1e-06, bool keepdim=false) { + return at::_ops::pairwise_distance::redispatch(dispatchKeySet, x1, x2, p, eps, keepdim); + } + + // aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor + inline at::Tensor cdist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p=2, c10::optional compute_mode=c10::nullopt) { + return at::_ops::cdist::redispatch(dispatchKeySet, x1, x2, p, compute_mode); + } + + // aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor + inline at::Tensor _euclidean_dist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2) { + return at::_ops::_euclidean_dist::redispatch(dispatchKeySet, x1, x2); + } + + // aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor + inline at::Tensor _cdist_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode) { + return at::_ops::_cdist_forward::redispatch(dispatchKeySet, x1, x2, p, compute_mode); + } + + // aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor + inline at::Tensor _cdist_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) { + return at::_ops::_cdist_backward::redispatch(dispatchKeySet, grad, x1, x2, p, cdist); + } + + // aten::pdist(Tensor self, float p=2) -> Tensor + inline at::Tensor pdist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p=2) { + return at::_ops::pdist::redispatch(dispatchKeySet, self, p); + } + + // aten::_pdist_forward(Tensor self, float p=2) -> Tensor + inline at::Tensor _pdist_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p=2) { + return at::_ops::_pdist_forward::redispatch(dispatchKeySet, self, p); + } + + // aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor + inline at::Tensor _pdist_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) { + return at::_ops::_pdist_backward::redispatch(dispatchKeySet, grad, self, p, pdist); + } + + // aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor + inline at::Tensor cosine_similarity(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, int64_t dim=1, double eps=1e-08) { + return at::_ops::cosine_similarity::redispatch(dispatchKeySet, x1, x2, dim, eps); + } + + // aten::permute(Tensor(a) self, int[] dims) -> Tensor(a) + inline at::Tensor permute(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::permute::redispatch(dispatchKeySet, self, dims); + } + + // aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) + inline at::Tensor movedim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { + return at::_ops::movedim_intlist::redispatch(dispatchKeySet, self, source, destination); + } + + // aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) + inline at::Tensor movedim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t source, int64_t destination) { + return at::_ops::movedim_int::redispatch(dispatchKeySet, self, source, destination); + } + + // aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) + inline at::Tensor moveaxis(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { + return at::_ops::moveaxis_intlist::redispatch(dispatchKeySet, self, source, destination); + } + + // aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a) + inline at::Tensor moveaxis(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t source, int64_t destination) { + return at::_ops::moveaxis_int::redispatch(dispatchKeySet, self, source, destination); + } + + // aten::numpy_T(Tensor(a) self) -> Tensor(a) + inline at::Tensor numpy_T(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::numpy_T::redispatch(dispatchKeySet, self); + } + + // aten::matrix_H(Tensor(a) self) -> Tensor(a) + inline at::Tensor matrix_H(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::matrix_H::redispatch(dispatchKeySet, self); + } + + // aten::mT(Tensor(a) self) -> Tensor(a) + inline at::Tensor mT(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::mT::redispatch(dispatchKeySet, self); + } + + // aten::mH(Tensor(a) self) -> Tensor(a) + inline at::Tensor mH(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::mH::redispatch(dispatchKeySet, self); + } + + // aten::adjoint(Tensor(a) self) -> Tensor(a) + inline at::Tensor adjoint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::adjoint::redispatch(dispatchKeySet, self); + } + + // aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor + inline at::Tensor pixel_shuffle(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t upscale_factor) { + return at::_ops::pixel_shuffle::redispatch(dispatchKeySet, self, upscale_factor); + } + + // aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor + inline at::Tensor pixel_unshuffle(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t downscale_factor) { + return at::_ops::pixel_unshuffle::redispatch(dispatchKeySet, self, downscale_factor); + } + + // aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor + inline at::Tensor channel_shuffle(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t groups) { + return at::_ops::channel_shuffle::redispatch(dispatchKeySet, self, groups); + } + + // aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor + inline at::Tensor channel_shuffle_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt groups) { + return at::_ops::channel_shuffle::redispatch(dispatchKeySet, self, groups); + } + + // aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor + inline at::Tensor native_channel_shuffle(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t groups) { + return at::_ops::native_channel_shuffle::redispatch(dispatchKeySet, self, groups); + } + + // aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor + inline at::Tensor native_channel_shuffle_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt groups) { + return at::_ops::native_channel_shuffle::redispatch(dispatchKeySet, self, groups); + } + + // aten::is_pinned(Tensor self, Device? device=None) -> bool + inline bool is_pinned(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional device=c10::nullopt) { + return at::_ops::is_pinned::redispatch(dispatchKeySet, self, device); + } + + // aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a) + inline at::Tensor pin_memory(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional device=c10::nullopt) { + return at::_ops::pin_memory::redispatch(dispatchKeySet, self, device); + } + + // aten::_pin_memory(Tensor self, Device? device=None) -> Tensor + inline at::Tensor _pin_memory(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional device=c10::nullopt) { + return at::_ops::_pin_memory::redispatch(dispatchKeySet, self, device); + } + + // aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor + inline at::Tensor pinverse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond=1e-15) { + return at::_ops::pinverse::redispatch(dispatchKeySet, self, rcond); + } + + // aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor + inline at::Tensor poisson_nll_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) { + return at::_ops::poisson_nll_loss::redispatch(dispatchKeySet, input, target, log_input, full, eps, reduction); + } + + // aten::rad2deg(Tensor self) -> Tensor + inline at::Tensor rad2deg(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::rad2deg::redispatch(dispatchKeySet, self); + } + + // aten::rad2deg_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & rad2deg_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::rad2deg_::redispatch(dispatchKeySet, self); + } + + // aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rad2deg_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::rad2deg_out::redispatch(dispatchKeySet, self, out); + } + + // aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rad2deg_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::rad2deg_out::redispatch(dispatchKeySet, self, out); + } + + // aten::deg2rad(Tensor self) -> Tensor + inline at::Tensor deg2rad(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::deg2rad::redispatch(dispatchKeySet, self); + } + + // aten::deg2rad_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & deg2rad_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::deg2rad_::redispatch(dispatchKeySet, self); + } + + // aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & deg2rad_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::deg2rad_out::redispatch(dispatchKeySet, self, out); + } + + // aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & deg2rad_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::deg2rad_out::redispatch(dispatchKeySet, self, out); + } + + // aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor scalar_tensor(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, at::TensorOptions options={}) { + return at::_ops::scalar_tensor::redispatch(dispatchKeySet, s, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor scalar_tensor(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::scalar_tensor::redispatch(dispatchKeySet, s, dtype, layout, device, pin_memory); + } + + // aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_names::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_names::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory); + } + + // aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_names::redispatch(dispatchKeySet, size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_names::redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory); + } + + // aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_generator_with_names::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator_with_names::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory); + } + + // aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_generator_with_names::redispatch(dispatchKeySet, size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator_with_names::redispatch(dispatchKeySet, size, generator, names, dtype, layout, device, pin_memory); + } + + // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::rand::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } + + // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::rand::redispatch(dispatchKeySet, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory); + } + + // aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::rand_generator::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); + } + + // aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::rand_generator::redispatch(dispatchKeySet, size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator::redispatch(dispatchKeySet, size, generator, dtype, layout, device, pin_memory); + } + + // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size) { + return at::_ops::rand_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), out); + } + + // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::rand_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), out); + } + + // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::rand_out::redispatch(dispatchKeySet, size, out); + } + + // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::rand_out::redispatch(dispatchKeySet, size, out); + } + + // aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional generator) { + return at::_ops::rand_generator_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, out); + } + + // aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::rand_generator_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, out); + } + + // aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator) { + return at::_ops::rand_generator_out::redispatch(dispatchKeySet, size, generator, out); + } + + // aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::rand_generator_out::redispatch(dispatchKeySet, size, generator, out); + } + + // aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor rand_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::rand_like::redispatch(dispatchKeySet, self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor rand_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::rand_like::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format); + } + + // aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong) { + return at::_ops::randint::redispatch(dispatchKeySet, high, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randint::redispatch(dispatchKeySet, high, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } + + // aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong) { + return at::_ops::randint::redispatch(dispatchKeySet, high, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randint::redispatch(dispatchKeySet, high, size, dtype, layout, device, pin_memory); + } + + // aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, c10::optional generator, at::TensorOptions options=at::kLong) { + return at::_ops::randint_generator::redispatch(dispatchKeySet, high, c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randint_generator::redispatch(dispatchKeySet, high, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); + } + + // aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options=at::kLong) { + return at::_ops::randint_generator::redispatch(dispatchKeySet, high, size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randint_generator::redispatch(dispatchKeySet, high, size, generator, dtype, layout, device, pin_memory); + } + + // aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong) { + return at::_ops::randint_low::redispatch(dispatchKeySet, low, high, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randint_low::redispatch(dispatchKeySet, low, high, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } + + // aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong) { + return at::_ops::randint_low::redispatch(dispatchKeySet, low, high, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randint_low::redispatch(dispatchKeySet, low, high, size, dtype, layout, device, pin_memory); + } + + // aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, at::TensorOptions options=at::kLong) { + return at::_ops::randint_low_generator::redispatch(dispatchKeySet, low, high, c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randint_low_generator::redispatch(dispatchKeySet, low, high, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); + } + + // aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options=at::kLong) { + return at::_ops::randint_low_generator::redispatch(dispatchKeySet, low, high, size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randint_low_generator::redispatch(dispatchKeySet, low, high, size, generator, dtype, layout, device, pin_memory); + } + + // aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t high, at::IntArrayRef size) { + return at::_ops::randint_out::redispatch(dispatchKeySet, high, c10::fromIntArrayRefSlow(size), out); + } + + // aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_outf(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::randint_out::redispatch(dispatchKeySet, high, c10::fromIntArrayRefSlow(size), out); + } + + // aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymInt high, c10::SymIntArrayRef size) { + return at::_ops::randint_out::redispatch(dispatchKeySet, high, size, out); + } + + // aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::randint_out::redispatch(dispatchKeySet, high, size, out); + } + + // aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t high, at::IntArrayRef size, c10::optional generator) { + return at::_ops::randint_generator_out::redispatch(dispatchKeySet, high, c10::fromIntArrayRefSlow(size), generator, out); + } + + // aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_outf(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::randint_generator_out::redispatch(dispatchKeySet, high, c10::fromIntArrayRefSlow(size), generator, out); + } + + // aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator) { + return at::_ops::randint_generator_out::redispatch(dispatchKeySet, high, size, generator, out); + } + + // aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::randint_generator_out::redispatch(dispatchKeySet, high, size, generator, out); + } + + // aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size) { + return at::_ops::randint_low_out::redispatch(dispatchKeySet, low, high, c10::fromIntArrayRefSlow(size), out); + } + + // aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_outf(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::randint_low_out::redispatch(dispatchKeySet, low, high, c10::fromIntArrayRefSlow(size), out); + } + + // aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size) { + return at::_ops::randint_low_out::redispatch(dispatchKeySet, low, high, size, out); + } + + // aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::randint_low_out::redispatch(dispatchKeySet, low, high, size, out); + } + + // aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator) { + return at::_ops::randint_low_generator_out::redispatch(dispatchKeySet, low, high, c10::fromIntArrayRefSlow(size), generator, out); + } + + // aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_outf(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::randint_low_generator_out::redispatch(dispatchKeySet, low, high, c10::fromIntArrayRefSlow(size), generator, out); + } + + // aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator) { + return at::_ops::randint_low_generator_out::redispatch(dispatchKeySet, low, high, size, generator, out); + } + + // aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::randint_low_generator_out::redispatch(dispatchKeySet, low, high, size, generator, out); + } + + // aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randint_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like::redispatch(dispatchKeySet, self, high, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randint_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like::redispatch(dispatchKeySet, self, high, dtype, layout, device, pin_memory, memory_format); + } + + // aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randint_like_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like::redispatch(dispatchKeySet, self, high, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randint_like_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like::redispatch(dispatchKeySet, self, high, dtype, layout, device, pin_memory, memory_format); + } + + // aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randint_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype::redispatch(dispatchKeySet, self, low, high, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randint_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t low, int64_t high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like_low_dtype::redispatch(dispatchKeySet, self, low, high, dtype, layout, device, pin_memory, memory_format); + } + + // aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randint_like_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt low, c10::SymInt high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype::redispatch(dispatchKeySet, self, low, high, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randint_like_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like_low_dtype::redispatch(dispatchKeySet, self, low, high, dtype, layout, device, pin_memory, memory_format); + } + + // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::randn::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } + + // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::randn::redispatch(dispatchKeySet, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory); + } + + // aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::randn_generator::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); + } + + // aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::randn_generator::redispatch(dispatchKeySet, size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator::redispatch(dispatchKeySet, size, generator, dtype, layout, device, pin_memory); + } + + // aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_names::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_names::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory); + } + + // aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_names::redispatch(dispatchKeySet, size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_names::redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory); + } + + // aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_generator_with_names::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator_with_names::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory); + } + + // aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_generator_with_names::redispatch(dispatchKeySet, size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator_with_names::redispatch(dispatchKeySet, size, generator, names, dtype, layout, device, pin_memory); + } + + // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size) { + return at::_ops::randn_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), out); + } + + // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::randn_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), out); + } + + // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::randn_out::redispatch(dispatchKeySet, size, out); + } + + // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::randn_out::redispatch(dispatchKeySet, size, out); + } + + // aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional generator) { + return at::_ops::randn_generator_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, out); + } + + // aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::randn_generator_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, out); + } + + // aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator) { + return at::_ops::randn_generator_out::redispatch(dispatchKeySet, size, generator, out); + } + + // aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::randn_generator_out::redispatch(dispatchKeySet, size, generator, out); + } + + // aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randn_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randn_like::redispatch(dispatchKeySet, self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randn_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randn_like::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format); + } + + // aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randperm(c10::DispatchKeySet dispatchKeySet, int64_t n, at::TensorOptions options=at::kLong) { + return at::_ops::randperm::redispatch(dispatchKeySet, n, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randperm(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randperm::redispatch(dispatchKeySet, n, dtype, layout, device, pin_memory); + } + + // aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randperm_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, at::TensorOptions options=at::kLong) { + return at::_ops::randperm::redispatch(dispatchKeySet, n, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randperm_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randperm::redispatch(dispatchKeySet, n, dtype, layout, device, pin_memory); + } + + // aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randperm(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional generator, at::TensorOptions options=at::kLong) { + return at::_ops::randperm_generator::redispatch(dispatchKeySet, n, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randperm(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randperm_generator::redispatch(dispatchKeySet, n, generator, dtype, layout, device, pin_memory); + } + + // aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randperm_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::optional generator, at::TensorOptions options=at::kLong) { + return at::_ops::randperm_generator::redispatch(dispatchKeySet, n, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randperm_symint(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randperm_generator::redispatch(dispatchKeySet, n, generator, dtype, layout, device, pin_memory); + } + + // aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randperm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n) { + return at::_ops::randperm_out::redispatch(dispatchKeySet, n, out); + } + + // aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randperm_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, at::Tensor & out) { + return at::_ops::randperm_out::redispatch(dispatchKeySet, n, out); + } + + // aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randperm_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymInt n) { + return at::_ops::randperm_out::redispatch(dispatchKeySet, n, out); + } + + // aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randperm_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, at::Tensor & out) { + return at::_ops::randperm_out::redispatch(dispatchKeySet, n, out); + } + + // aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randperm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n, c10::optional generator) { + return at::_ops::randperm_generator_out::redispatch(dispatchKeySet, n, generator, out); + } + + // aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randperm_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional generator, at::Tensor & out) { + return at::_ops::randperm_generator_out::redispatch(dispatchKeySet, n, generator, out); + } + + // aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randperm_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymInt n, c10::optional generator) { + return at::_ops::randperm_generator_out::redispatch(dispatchKeySet, n, generator, out); + } + + // aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randperm_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::optional generator, at::Tensor & out) { + return at::_ops::randperm_generator_out::redispatch(dispatchKeySet, n, generator, out); + } + + // aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor range(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step=1, at::TensorOptions options={}) { + return at::_ops::range_step::redispatch(dispatchKeySet, start, end, step, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor range(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::range_step::redispatch(dispatchKeySet, start, end, step, dtype, layout, device, pin_memory); + } + + // aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor range(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, at::TensorOptions options={}) { + return at::_ops::range::redispatch(dispatchKeySet, start, end, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor range(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::range::redispatch(dispatchKeySet, start, end, dtype, layout, device, pin_memory); + } + + // aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & range_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & start, const at::Scalar & end) { + return at::_ops::range_out_::redispatch(dispatchKeySet, start, end, out); + } + + // aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & range_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, at::Tensor & out) { + return at::_ops::range_out_::redispatch(dispatchKeySet, start, end, out); + } + + // aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & range_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step) { + return at::_ops::range_out::redispatch(dispatchKeySet, start, end, step, out); + } + + // aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & range_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) { + return at::_ops::range_out::redispatch(dispatchKeySet, start, end, step, out); + } + + // aten::ravel(Tensor(a) self) -> Tensor(a) + inline at::Tensor ravel(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::ravel::redispatch(dispatchKeySet, self); + } + + // aten::reciprocal(Tensor self) -> Tensor + inline at::Tensor reciprocal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::reciprocal::redispatch(dispatchKeySet, self); + } + + // aten::reciprocal_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & reciprocal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::reciprocal_::redispatch(dispatchKeySet, self); + } + + // aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reciprocal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::reciprocal_out::redispatch(dispatchKeySet, self, out); + } + + // aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reciprocal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::reciprocal_out::redispatch(dispatchKeySet, self, out); + } + + // aten::neg(Tensor self) -> Tensor + inline at::Tensor neg(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::neg::redispatch(dispatchKeySet, self); + } + + // aten::neg_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & neg_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::neg_::redispatch(dispatchKeySet, self); + } + + // aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & neg_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::neg_out::redispatch(dispatchKeySet, self, out); + } + + // aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & neg_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::neg_out::redispatch(dispatchKeySet, self, out); + } + + // aten::negative(Tensor self) -> Tensor + inline at::Tensor negative(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::negative::redispatch(dispatchKeySet, self); + } + + // aten::negative_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & negative_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::negative_::redispatch(dispatchKeySet, self); + } + + // aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & negative_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::negative_out::redispatch(dispatchKeySet, self, out); + } + + // aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & negative_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::negative_out::redispatch(dispatchKeySet, self, out); + } + + // aten::repeat(Tensor self, SymInt[] repeats) -> Tensor + inline at::Tensor repeat(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef repeats) { + return at::_ops::repeat::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(repeats)); + } + + // aten::repeat(Tensor self, SymInt[] repeats) -> Tensor + inline at::Tensor repeat_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef repeats) { + return at::_ops::repeat::redispatch(dispatchKeySet, self, repeats); + } + + // aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor + inline at::Tensor repeat_interleave(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor::redispatch(dispatchKeySet, repeats, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt); + } + + // aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor + inline at::Tensor repeat_interleave_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor::redispatch(dispatchKeySet, repeats, output_size); + } + + // aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor + inline at::Tensor repeat_interleave(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_Tensor::redispatch(dispatchKeySet, self, repeats, dim, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt); + } + + // aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor + inline at::Tensor repeat_interleave_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_Tensor::redispatch(dispatchKeySet, self, repeats, dim, output_size); + } + + // aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor + inline at::Tensor repeat_interleave(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_int::redispatch(dispatchKeySet, self, repeats, dim, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt); + } + + // aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor + inline at::Tensor repeat_interleave_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_int::redispatch(dispatchKeySet, self, repeats, dim, output_size); + } + + // aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) + inline at::Tensor reshape(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape) { + return at::_ops::reshape::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(shape)); + } + + // aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) + inline at::Tensor reshape_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef shape) { + return at::_ops::reshape::redispatch(dispatchKeySet, self, shape); + } + + // aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor + inline at::Tensor _reshape_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_reshape_copy::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size)); + } + + // aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor + inline at::Tensor _reshape_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::_reshape_copy::redispatch(dispatchKeySet, self, size); + } + + // aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) + inline at::Tensor _reshape_alias(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::_reshape_alias::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); + } + + // aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) + inline at::Tensor _reshape_alias_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::_reshape_alias::redispatch(dispatchKeySet, self, size, stride); + } + + // aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor + inline at::Tensor _mkldnn_reshape(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape) { + return at::_ops::_mkldnn_reshape::redispatch(dispatchKeySet, self, shape); + } + + // aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a) + inline at::Tensor reshape_as(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::reshape_as::redispatch(dispatchKeySet, self, other); + } + + // aten::round(Tensor self) -> Tensor + inline at::Tensor round(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::round::redispatch(dispatchKeySet, self); + } + + // aten::round_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & round_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::round_::redispatch(dispatchKeySet, self); + } + + // aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & round_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::round_out::redispatch(dispatchKeySet, self, out); + } + + // aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & round_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::round_out::redispatch(dispatchKeySet, self, out); + } + + // aten::round.decimals(Tensor self, *, int decimals) -> Tensor + inline at::Tensor round(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals) { + return at::_ops::round_decimals::redispatch(dispatchKeySet, self, decimals); + } + + // aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!) + inline at::Tensor & round_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t decimals) { + return at::_ops::round__decimals::redispatch(dispatchKeySet, self, decimals); + } + + // aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & round_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t decimals) { + return at::_ops::round_decimals_out::redispatch(dispatchKeySet, self, decimals, out); + } + + // aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & round_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out) { + return at::_ops::round_decimals_out::redispatch(dispatchKeySet, self, decimals, out); + } + + // aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor + inline at::Tensor rrelu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt) { + return at::_ops::rrelu::redispatch(dispatchKeySet, self, lower, upper, training, generator); + } + + // aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & rrelu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt) { + return at::_ops::rrelu_::redispatch(dispatchKeySet, self, lower, upper, training, generator); + } + + // aten::relu(Tensor self) -> Tensor + inline at::Tensor relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::relu::redispatch(dispatchKeySet, self); + } + + // aten::relu_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & relu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::relu_::redispatch(dispatchKeySet, self); + } + + // aten::relu6(Tensor self) -> Tensor + inline at::Tensor relu6(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::relu6::redispatch(dispatchKeySet, self); + } + + // aten::relu6_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & relu6_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::relu6_::redispatch(dispatchKeySet, self); + } + + // aten::prelu(Tensor self, Tensor weight) -> Tensor + inline at::Tensor prelu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight) { + return at::_ops::prelu::redispatch(dispatchKeySet, self, weight); + } + + // aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor + inline at::Tensor _prelu_kernel(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight) { + return at::_ops::_prelu_kernel::redispatch(dispatchKeySet, self, weight); + } + + // aten::_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor) + inline ::std::tuple _prelu_kernel_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) { + return at::_ops::_prelu_kernel_backward::redispatch(dispatchKeySet, grad_output, self, weight); + } + + // aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gelu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_out::redispatch(dispatchKeySet, self, approximate, out); + } + + // aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gelu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate, at::Tensor & out) { + return at::_ops::gelu_out::redispatch(dispatchKeySet, self, approximate, out); + } + + // aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!) + inline at::Tensor & gelu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_::redispatch(dispatchKeySet, self, approximate); + } + + // aten::gelu(Tensor self, *, str approximate='none') -> Tensor + inline at::Tensor gelu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu::redispatch(dispatchKeySet, self, approximate); + } + + // aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & gelu_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, approximate, grad_input); + } + + // aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & gelu_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input) { + return at::_ops::gelu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, approximate, grad_input); + } + + // aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor + inline at::Tensor gelu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_backward::redispatch(dispatchKeySet, grad_output, self, approximate); + } + + // aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor + inline at::Tensor infinitely_differentiable_gelu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self) { + return at::_ops::infinitely_differentiable_gelu_backward::redispatch(dispatchKeySet, grad, self); + } + + // aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardshrink_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::hardshrink_out::redispatch(dispatchKeySet, self, lambd, out); + } + + // aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardshrink_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) { + return at::_ops::hardshrink_out::redispatch(dispatchKeySet, self, lambd, out); + } + + // aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor + inline at::Tensor hardshrink(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::hardshrink::redispatch(dispatchKeySet, self, lambd); + } + + // aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & hardshrink_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::hardshrink_backward_grad_input::redispatch(dispatchKeySet, grad_out, self, lambd, grad_input); + } + + // aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & hardshrink_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) { + return at::_ops::hardshrink_backward_grad_input::redispatch(dispatchKeySet, grad_out, self, lambd, grad_input); + } + + // aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor + inline at::Tensor hardshrink_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::hardshrink_backward::redispatch(dispatchKeySet, grad_out, self, lambd); + } + + // aten::rsqrt(Tensor self) -> Tensor + inline at::Tensor rsqrt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::rsqrt::redispatch(dispatchKeySet, self); + } + + // aten::rsqrt_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & rsqrt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::rsqrt_::redispatch(dispatchKeySet, self); + } + + // aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rsqrt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::rsqrt_out::redispatch(dispatchKeySet, self, out); + } + + // aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rsqrt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::rsqrt_out::redispatch(dispatchKeySet, self, out); + } + + // aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) + inline at::Tensor select(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, int64_t index) { + return at::_ops::select_Dimname::redispatch(dispatchKeySet, self, dim, index); + } + + // aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) + inline at::Tensor select(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, int64_t index) { + return at::_ops::select_int::redispatch(dispatchKeySet, self, dim, index); + } + + // aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) + inline at::Tensor select_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index) { + return at::_ops::select_int::redispatch(dispatchKeySet, self, dim, index); + } + + // aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor + inline at::Tensor select_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) { + return at::_ops::select_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index); + } + + // aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor + inline at::Tensor select_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) { + return at::_ops::select_backward::redispatch(dispatchKeySet, grad_output, input_sizes, dim, index); + } + + // aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor + inline at::Tensor _nested_select_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, int64_t index) { + return at::_ops::_nested_select_backward::redispatch(dispatchKeySet, grad_output, self, dim, index); + } + + // aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor + inline at::Tensor _nested_select_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) { + return at::_ops::_nested_select_backward::redispatch(dispatchKeySet, grad_output, self, dim, index); + } + + // aten::selu(Tensor self) -> Tensor + inline at::Tensor selu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::selu::redispatch(dispatchKeySet, self); + } + + // aten::selu_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & selu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::selu_::redispatch(dispatchKeySet, self); + } + + // aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor + inline at::Tensor celu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha=1.0) { + return at::_ops::celu::redispatch(dispatchKeySet, self, alpha); + } + + // aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!) + inline at::Tensor & celu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha=1.0) { + return at::_ops::celu_::redispatch(dispatchKeySet, self, alpha); + } + + // aten::silu(Tensor self) -> Tensor + inline at::Tensor silu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::silu::redispatch(dispatchKeySet, self); + } + + // aten::silu_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & silu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::silu_::redispatch(dispatchKeySet, self); + } + + // aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & silu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::silu_out::redispatch(dispatchKeySet, self, out); + } + + // aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & silu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::silu_out::redispatch(dispatchKeySet, self, out); + } + + // aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & silu_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::silu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, grad_input); + } + + // aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & silu_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) { + return at::_ops::silu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, grad_input); + } + + // aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor silu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::silu_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::mish(Tensor self) -> Tensor + inline at::Tensor mish(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::mish::redispatch(dispatchKeySet, self); + } + + // aten::mish_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & mish_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::mish_::redispatch(dispatchKeySet, self); + } + + // aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mish_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::mish_out::redispatch(dispatchKeySet, self, out); + } + + // aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mish_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::mish_out::redispatch(dispatchKeySet, self, out); + } + + // aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor mish_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::mish_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::sigmoid(Tensor self) -> Tensor + inline at::Tensor sigmoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sigmoid::redispatch(dispatchKeySet, self); + } + + // aten::sigmoid_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sigmoid_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sigmoid_::redispatch(dispatchKeySet, self); + } + + // aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sigmoid_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sigmoid_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::logit(Tensor self, float? eps=None) -> Tensor + inline at::Tensor logit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::logit::redispatch(dispatchKeySet, self, eps); + } + + // aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!) + inline at::Tensor & logit_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::logit_::redispatch(dispatchKeySet, self, eps); + } + + // aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logit_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::logit_out::redispatch(dispatchKeySet, self, eps, out); + } + + // aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logit_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional eps, at::Tensor & out) { + return at::_ops::logit_out::redispatch(dispatchKeySet, self, eps, out); + } + + // aten::sin(Tensor self) -> Tensor + inline at::Tensor sin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sin::redispatch(dispatchKeySet, self); + } + + // aten::sin_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sin_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sin_::redispatch(dispatchKeySet, self); + } + + // aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sinc(Tensor self) -> Tensor + inline at::Tensor sinc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sinc::redispatch(dispatchKeySet, self); + } + + // aten::sinc_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sinc_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sinc_::redispatch(dispatchKeySet, self); + } + + // aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sinc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sinc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sinc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sinc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sinh(Tensor self) -> Tensor + inline at::Tensor sinh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sinh::redispatch(dispatchKeySet, self); + } + + // aten::sinh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sinh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sinh_::redispatch(dispatchKeySet, self); + } + + // aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sinh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sinh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::detach(Tensor(a) self) -> Tensor(a) + inline at::Tensor detach(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::detach::redispatch(dispatchKeySet, self); + } + + // aten::detach_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & detach_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::detach_::redispatch(dispatchKeySet, self); + } + + // aten::size.int(Tensor self, int dim) -> int + inline int64_t __dispatch_size(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::size_int::redispatch(dispatchKeySet, self, dim); + } + + // aten::size.Dimname(Tensor self, Dimname dim) -> int + inline int64_t size(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::size_Dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::sym_size.int(Tensor self, int dim) -> SymInt + inline c10::SymInt __dispatch_sym_size(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::sym_size_int::redispatch(dispatchKeySet, self, dim); + } + + // aten::sym_numel(Tensor self) -> SymInt + inline c10::SymInt __dispatch_sym_numel(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sym_numel::redispatch(dispatchKeySet, self); + } + + // aten::sym_storage_offset(Tensor self) -> SymInt + inline c10::SymInt __dispatch_sym_storage_offset(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sym_storage_offset::redispatch(dispatchKeySet, self); + } + + // aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + inline at::Tensor slice(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_Tensor::redispatch(dispatchKeySet, self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); + } + + // aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + inline at::Tensor slice_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_Tensor::redispatch(dispatchKeySet, self, dim, start, end, step); + } + + // aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor + inline at::Tensor slice_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step) { + return at::_ops::slice_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, start, end, step); + } + + // aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor + inline at::Tensor slice_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) { + return at::_ops::slice_backward::redispatch(dispatchKeySet, grad_output, input_sizes, dim, start, end, step); + } + + // aten::slice_inverse(Tensor(a) self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + inline at::Tensor slice_inverse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_inverse::redispatch(dispatchKeySet, self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); + } + + // aten::slice_inverse(Tensor(a) self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + inline at::Tensor slice_inverse_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_inverse::redispatch(dispatchKeySet, self, src, dim, start, end, step); + } + + // aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + inline at::Tensor slice_scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_scatter::redispatch(dispatchKeySet, self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); + } + + // aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + inline at::Tensor slice_scatter_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_scatter::redispatch(dispatchKeySet, self, src, dim, start, end, step); + } + + // aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor + inline at::Tensor select_scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index) { + return at::_ops::select_scatter::redispatch(dispatchKeySet, self, src, dim, index); + } + + // aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor + inline at::Tensor select_scatter_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) { + return at::_ops::select_scatter::redispatch(dispatchKeySet, self, src, dim, index); + } + + // aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor + inline at::Tensor diagonal_scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) { + return at::_ops::diagonal_scatter::redispatch(dispatchKeySet, self, src, offset, dim1, dim2); + } + + // aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor + inline at::Tensor as_strided_scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter::redispatch(dispatchKeySet, self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); + } + + // aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor + inline at::Tensor as_strided_scatter_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter::redispatch(dispatchKeySet, self, src, size, stride, storage_offset); + } + + // aten::smm(Tensor self, Tensor mat2) -> Tensor + inline at::Tensor smm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::smm::redispatch(dispatchKeySet, self, mat2); + } + + // aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + inline at::Tensor softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::softmax_int::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::softmax_int_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::softmax_int_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::softmax_Dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + inline at::Tensor _softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_softmax::redispatch(dispatchKeySet, self, dim, half_to_float); + } + + // aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { + return at::_ops::_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor + inline at::Tensor _softmax_backward_data(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + return at::_ops::_softmax_backward_data::redispatch(dispatchKeySet, grad_output, output, dim, input_dtype); + } + + // aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _softmax_backward_data_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + return at::_ops::_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, input_dtype, grad_input); + } + + // aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _softmax_backward_data_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) { + return at::_ops::_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, input_dtype, grad_input); + } + + // aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] + inline ::std::vector unsafe_split(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::unsafe_split_Tensor::redispatch(dispatchKeySet, self, split_size, dim); + } + + // aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] + inline ::std::vector unsafe_split_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) { + return at::_ops::unsafe_split_Tensor::redispatch(dispatchKeySet, self, split_size, dim); + } + + // aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] + inline ::std::vector split(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::split_Tensor::redispatch(dispatchKeySet, self, split_size, dim); + } + + // aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] + inline ::std::vector split_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) { + return at::_ops::split_Tensor::redispatch(dispatchKeySet, self, split_size, dim); + } + + // aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] + inline ::std::vector split(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef split_size, int64_t dim=0) { + return at::_ops::split_sizes::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(split_size), dim); + } + + // aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] + inline ::std::vector split_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim=0) { + return at::_ops::split_sizes::redispatch(dispatchKeySet, self, split_size, dim); + } + + // aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] + inline ::std::vector unsafe_split_with_sizes(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::unsafe_split_with_sizes::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(split_sizes), dim); + } + + // aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] + inline ::std::vector unsafe_split_with_sizes_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::unsafe_split_with_sizes::redispatch(dispatchKeySet, self, split_sizes, dim); + } + + // aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] + inline ::std::vector split_with_sizes(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(split_sizes), dim); + } + + // aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] + inline ::std::vector split_with_sizes_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes::redispatch(dispatchKeySet, self, split_sizes, dim); + } + + // aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] + inline ::std::vector hsplit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections) { + return at::_ops::hsplit_int::redispatch(dispatchKeySet, self, sections); + } + + // aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] + inline ::std::vector hsplit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices) { + return at::_ops::hsplit_array::redispatch(dispatchKeySet, self, indices); + } + + // aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] + inline ::std::vector vsplit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections) { + return at::_ops::vsplit_int::redispatch(dispatchKeySet, self, sections); + } + + // aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] + inline ::std::vector vsplit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices) { + return at::_ops::vsplit_array::redispatch(dispatchKeySet, self, indices); + } + + // aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] + inline ::std::vector dsplit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections) { + return at::_ops::dsplit_int::redispatch(dispatchKeySet, self, sections); + } + + // aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] + inline ::std::vector dsplit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices) { + return at::_ops::dsplit_array::redispatch(dispatchKeySet, self, indices); + } + + // aten::squeeze(Tensor(a) self) -> Tensor(a) + inline at::Tensor squeeze(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::squeeze::redispatch(dispatchKeySet, self); + } + + // aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) + inline at::Tensor squeeze(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::squeeze_dim::redispatch(dispatchKeySet, self, dim); + } + + // aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) + inline at::Tensor squeeze(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::squeeze_dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a) + inline at::Tensor squeeze(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::squeeze_dims::redispatch(dispatchKeySet, self, dim); + } + + // aten::squeeze_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & squeeze_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::squeeze_::redispatch(dispatchKeySet, self); + } + + // aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) + inline at::Tensor & squeeze_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim) { + return at::_ops::squeeze__dim::redispatch(dispatchKeySet, self, dim); + } + + // aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!) + inline at::Tensor & squeeze_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::squeeze__dims::redispatch(dispatchKeySet, self, dim); + } + + // aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!) + inline at::Tensor & squeeze_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim) { + return at::_ops::squeeze__dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor sspaddmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sspaddmm::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha); + } + + // aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sspaddmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sspaddmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sspaddmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::sspaddmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::_chunk_cat(Tensor[] tensors, int dim, int num_chunks) -> Tensor + inline at::Tensor _chunk_cat(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, int64_t num_chunks) { + return at::_ops::_chunk_cat::redispatch(dispatchKeySet, tensors, dim, num_chunks); + } + + // aten::_chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _chunk_cat_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, int64_t dim, int64_t num_chunks) { + return at::_ops::_chunk_cat_out::redispatch(dispatchKeySet, tensors, dim, num_chunks, out); + } + + // aten::_chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _chunk_cat_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, int64_t num_chunks, at::Tensor & out) { + return at::_ops::_chunk_cat_out::redispatch(dispatchKeySet, tensors, dim, num_chunks, out); + } + + // aten::stack(Tensor[] tensors, int dim=0) -> Tensor + inline at::Tensor stack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim=0) { + return at::_ops::stack::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & stack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, int64_t dim=0) { + return at::_ops::stack_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & stack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) { + return at::_ops::stack_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::_stack(Tensor[] tensors, int dim=0) -> Tensor + inline at::Tensor _stack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim=0) { + return at::_ops::_stack::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _stack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, int64_t dim=0) { + return at::_ops::_stack_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _stack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) { + return at::_ops::_stack_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::hstack(Tensor[] tensors) -> Tensor + inline at::Tensor hstack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::hstack::redispatch(dispatchKeySet, tensors); + } + + // aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hstack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::hstack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hstack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::hstack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::vstack(Tensor[] tensors) -> Tensor + inline at::Tensor vstack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::vstack::redispatch(dispatchKeySet, tensors); + } + + // aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & vstack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::vstack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & vstack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::vstack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::dstack(Tensor[] tensors) -> Tensor + inline at::Tensor dstack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::dstack::redispatch(dispatchKeySet, tensors); + } + + // aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dstack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::dstack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dstack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::dstack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor + inline at::Tensor stft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool normalized, c10::optional onesided=c10::nullopt, c10::optional return_complex=c10::nullopt) { + return at::_ops::stft::redispatch(dispatchKeySet, self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex); + } + + // aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor + inline at::Tensor stft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, c10::optional hop_length=c10::nullopt, c10::optional win_length=c10::nullopt, const c10::optional & window={}, bool center=true, c10::string_view pad_mode="reflect", bool normalized=false, c10::optional onesided=c10::nullopt, c10::optional return_complex=c10::nullopt) { + return at::_ops::stft_center::redispatch(dispatchKeySet, self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex); + } + + // aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor + inline at::Tensor istft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, c10::optional hop_length=c10::nullopt, c10::optional win_length=c10::nullopt, const c10::optional & window={}, bool center=true, bool normalized=false, c10::optional onesided=c10::nullopt, c10::optional length=c10::nullopt, bool return_complex=false) { + return at::_ops::istft::redispatch(dispatchKeySet, self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex); + } + + // aten::stride.int(Tensor self, int dim) -> int + inline int64_t __dispatch_stride(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::stride_int::redispatch(dispatchKeySet, self, dim); + } + + // aten::stride.Dimname(Tensor self, Dimname dim) -> int + inline int64_t stride(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::stride_Dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::sym_stride.int(Tensor self, int dim) -> SymInt + inline c10::SymInt __dispatch_sym_stride(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::sym_stride_int::redispatch(dispatchKeySet, self, dim); + } + + // aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::sum::redispatch(dispatchKeySet, self, dtype); + } + + // aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::sum_dim_IntList::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::sum_dim_DimnameList::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::sum_IntList_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::sum_IntList_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::sum_DimnameList_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::sum_DimnameList_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor + inline at::Tensor _nested_sum_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false) { + return at::_ops::_nested_sum_backward::redispatch(dispatchKeySet, grad, self, dim, keepdim); + } + + // aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor nansum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nansum::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nansum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nansum_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nansum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::nansum_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor + inline at::Tensor sum_to_size(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::sum_to_size::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size)); + } + + // aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor + inline at::Tensor sum_to_size_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::sum_to_size::redispatch(dispatchKeySet, self, size); + } + + // aten::sqrt(Tensor self) -> Tensor + inline at::Tensor sqrt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sqrt::redispatch(dispatchKeySet, self); + } + + // aten::sqrt_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sqrt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sqrt_::redispatch(dispatchKeySet, self); + } + + // aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sqrt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sqrt_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sqrt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sqrt_out::redispatch(dispatchKeySet, self, out); + } + + // aten::square(Tensor self) -> Tensor + inline at::Tensor square(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::square::redispatch(dispatchKeySet, self); + } + + // aten::square_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & square_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::square_::redispatch(dispatchKeySet, self); + } + + // aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & square_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::square_out::redispatch(dispatchKeySet, self, out); + } + + // aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & square_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::square_out::redispatch(dispatchKeySet, self, out); + } + + // aten::std(Tensor self, bool unbiased=True) -> Tensor + inline at::Tensor std(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased) { + return at::_ops::std::redispatch(dispatchKeySet, self, unbiased); + } + + // aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor + inline at::Tensor std(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor + inline at::Tensor std(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_correction::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + inline ::std::tuple std_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased) { + return at::_ops::std_mean::redispatch(dispatchKeySet, self, unbiased); + } + + // aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple std_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_mean_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple std_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_mean_correction::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple std_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_mean_names_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple std_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_mean_correction_names::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::std_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out) { + return at::_ops::std_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + inline at::Tensor std(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_names_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_names_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::std_names_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor + inline at::Tensor std(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_correction_names::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_correction_names_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim, at::Tensor & out) { + return at::_ops::std_correction_names_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor prod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::prod::redispatch(dispatchKeySet, self, dtype); + } + + // aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor prod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::prod_dim_int::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::prod_int_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::prod_int_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor prod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::prod_dim_Dimname::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::prod_Dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::prod_Dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::t(Tensor(a) self) -> Tensor(a) + inline at::Tensor t(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::t::redispatch(dispatchKeySet, self); + } + + // aten::t_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & t_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::t_::redispatch(dispatchKeySet, self); + } + + // aten::tan(Tensor self) -> Tensor + inline at::Tensor tan(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::tan::redispatch(dispatchKeySet, self); + } + + // aten::tan_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & tan_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::tan_::redispatch(dispatchKeySet, self); + } + + // aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tan_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::tan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tan_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::tan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::tanh(Tensor self) -> Tensor + inline at::Tensor tanh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::tanh::redispatch(dispatchKeySet, self); + } + + // aten::tanh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & tanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::tanh_::redispatch(dispatchKeySet, self); + } + + // aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tanh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::tanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tanh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::tanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor + inline at::Tensor tensordot(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) { + return at::_ops::tensordot::redispatch(dispatchKeySet, self, other, dims_self, dims_other); + } + + // aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tensordot_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) { + return at::_ops::tensordot_out::redispatch(dispatchKeySet, self, other, dims_self, dims_other, out); + } + + // aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tensordot_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out) { + return at::_ops::tensordot_out::redispatch(dispatchKeySet, self, other, dims_self, dims_other, out); + } + + // aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor + inline at::Tensor threshold(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) { + return at::_ops::threshold::redispatch(dispatchKeySet, self, threshold, value); + } + + // aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!) + inline at::Tensor & threshold_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) { + return at::_ops::threshold_::redispatch(dispatchKeySet, self, threshold, value); + } + + // aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & threshold_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) { + return at::_ops::threshold_out::redispatch(dispatchKeySet, self, threshold, value, out); + } + + // aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & threshold_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out) { + return at::_ops::threshold_out::redispatch(dispatchKeySet, self, threshold, value, out); + } + + // aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & threshold_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { + return at::_ops::threshold_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, threshold, grad_input); + } + + // aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & threshold_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) { + return at::_ops::threshold_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, threshold, grad_input); + } + + // aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor + inline at::Tensor threshold_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { + return at::_ops::threshold_backward::redispatch(dispatchKeySet, grad_output, self, threshold); + } + + // aten::tile(Tensor self, SymInt[] dims) -> Tensor + inline at::Tensor tile(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::tile::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(dims)); + } + + // aten::tile(Tensor self, SymInt[] dims) -> Tensor + inline at::Tensor tile_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef dims) { + return at::_ops::tile::redispatch(dispatchKeySet, self, dims); + } + + // aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) + inline at::Tensor transpose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::transpose_int::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) + inline at::Tensor transpose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) { + return at::_ops::transpose_Dimname::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor + inline at::Tensor _mkldnn_transpose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::_mkldnn_transpose::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + inline at::Tensor & transpose_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::transpose_::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + inline at::Tensor & _mkldnn_transpose_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::_mkldnn_transpose_::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::one_hot(Tensor self, int num_classes=-1) -> Tensor + inline at::Tensor one_hot(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_classes=-1) { + return at::_ops::one_hot::redispatch(dispatchKeySet, self, num_classes); + } + + // aten::flip(Tensor self, int[] dims) -> Tensor + inline at::Tensor flip(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::flip::redispatch(dispatchKeySet, self, dims); + } + + // aten::fliplr(Tensor self) -> Tensor + inline at::Tensor fliplr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::fliplr::redispatch(dispatchKeySet, self); + } + + // aten::flipud(Tensor self) -> Tensor + inline at::Tensor flipud(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::flipud::redispatch(dispatchKeySet, self); + } + + // aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor + inline at::Tensor roll(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(shifts), dims); + } + + // aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor + inline at::Tensor roll_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll::redispatch(dispatchKeySet, self, shifts, dims); + } + + // aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor + inline at::Tensor rot90(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k=1, at::IntArrayRef dims={0,1}) { + return at::_ops::rot90::redispatch(dispatchKeySet, self, k, dims); + } + + // aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor + inline at::Tensor trapezoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) { + return at::_ops::trapezoid_x::redispatch(dispatchKeySet, y, x, dim); + } + + // aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor + inline at::Tensor trapezoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Scalar & dx=1, int64_t dim=-1) { + return at::_ops::trapezoid_dx::redispatch(dispatchKeySet, y, dx, dim); + } + + // aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor + inline at::Tensor trapz(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) { + return at::_ops::trapz_x::redispatch(dispatchKeySet, y, x, dim); + } + + // aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor + inline at::Tensor trapz(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, double dx=1, int64_t dim=-1) { + return at::_ops::trapz_dx::redispatch(dispatchKeySet, y, dx, dim); + } + + // aten::_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _transform_bias_rescale_qkv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) { + return at::_ops::_transform_bias_rescale_qkv::redispatch(dispatchKeySet, qkv, qkv_bias, num_heads); + } + + // aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor + inline at::Tensor _nested_tensor_from_mask(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask, bool mask_check=true) { + return at::_ops::_nested_tensor_from_mask::redispatch(dispatchKeySet, t, mask, mask_check); + } + + // aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool + inline bool _nested_tensor_from_mask_left_aligned(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask) { + return at::_ops::_nested_tensor_from_mask_left_aligned::redispatch(dispatchKeySet, t, mask); + } + + // aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor + inline at::Tensor _nested_from_padded(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213=false) { + return at::_ops::_nested_from_padded::redispatch(dispatchKeySet, padded, cpu_nested_shape_example, fuse_transform_0213); + } + + // aten::_nested_tensor_size(Tensor self) -> Tensor + inline at::Tensor _nested_tensor_size(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_nested_tensor_size::redispatch(dispatchKeySet, self); + } + + // aten::_nested_tensor_strides(Tensor self) -> Tensor + inline at::Tensor _nested_tensor_strides(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_nested_tensor_strides::redispatch(dispatchKeySet, self); + } + + // aten::_nested_tensor_storage_offsets(Tensor self) -> Tensor + inline at::Tensor _nested_tensor_storage_offsets(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_nested_tensor_storage_offsets::redispatch(dispatchKeySet, self); + } + + // aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor + inline at::Tensor _nested_from_padded_and_nested_example(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & nt_example) { + return at::_ops::_nested_from_padded_and_nested_example::redispatch(dispatchKeySet, padded, nt_example); + } + + // aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a) + inline at::Tensor _nested_view_from_buffer(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) { + return at::_ops::_nested_view_from_buffer::redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets); + } + + // aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor + inline at::Tensor _nested_view_from_buffer_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) { + return at::_ops::_nested_view_from_buffer_copy::redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets); + } + + // aten::_nested_view_from_jagged(Tensor(a) self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor(a) + inline at::Tensor _nested_view_from_jagged(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional & lengths={}, int64_t ragged_idx=1) { + return at::_ops::_nested_view_from_jagged::redispatch(dispatchKeySet, self, offsets, dummy, lengths, ragged_idx); + } + + // aten::_nested_view_from_jagged_copy(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor + inline at::Tensor _nested_view_from_jagged_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional & lengths={}, int64_t ragged_idx=1) { + return at::_ops::_nested_view_from_jagged_copy::redispatch(dispatchKeySet, self, offsets, dummy, lengths, ragged_idx); + } + + // aten::_nested_get_values(Tensor(a) self) -> Tensor(a) + inline at::Tensor _nested_get_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_nested_get_values::redispatch(dispatchKeySet, self); + } + + // aten::_nested_get_values_copy(Tensor self) -> Tensor + inline at::Tensor _nested_get_values_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_nested_get_values_copy::redispatch(dispatchKeySet, self); + } + + // aten::_nested_get_offsets(Tensor self) -> Tensor + inline at::Tensor _nested_get_offsets(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_nested_get_offsets::redispatch(dispatchKeySet, self); + } + + // aten::_nested_get_lengths(Tensor self) -> Tensor + inline at::Tensor _nested_get_lengths(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_nested_get_lengths::redispatch(dispatchKeySet, self); + } + + // aten::_nested_get_ragged_idx(Tensor self) -> int + inline int64_t _nested_get_ragged_idx(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_nested_get_ragged_idx::redispatch(dispatchKeySet, self); + } + + // aten::_nested_get_jagged_dummy(Tensor any) -> Tensor + inline at::Tensor _nested_get_jagged_dummy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & any) { + return at::_ops::_nested_get_jagged_dummy::redispatch(dispatchKeySet, any); + } + + // aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor + inline at::Tensor _trilinear(c10::DispatchKeySet dispatchKeySet, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim=1) { + return at::_ops::_trilinear::redispatch(dispatchKeySet, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim); + } + + // aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor + inline at::Tensor triplet_margin_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin=1.0, double p=2, double eps=1e-06, bool swap=false, int64_t reduction=at::Reduction::Mean) { + return at::_ops::triplet_margin_loss::redispatch(dispatchKeySet, anchor, positive, negative, margin, p, eps, swap, reduction); + } + + // aten::trunc(Tensor self) -> Tensor + inline at::Tensor trunc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::trunc::redispatch(dispatchKeySet, self); + } + + // aten::trunc_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & trunc_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::trunc_::redispatch(dispatchKeySet, self); + } + + // aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & trunc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::trunc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & trunc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::trunc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::fix(Tensor self) -> Tensor + inline at::Tensor fix(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::fix::redispatch(dispatchKeySet, self); + } + + // aten::fix_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & fix_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::fix_::redispatch(dispatchKeySet, self); + } + + // aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fix_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::fix_out::redispatch(dispatchKeySet, self, out); + } + + // aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fix_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::fix_out::redispatch(dispatchKeySet, self, out); + } + + // aten::type_as(Tensor self, Tensor other) -> Tensor + inline at::Tensor type_as(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::type_as::redispatch(dispatchKeySet, self, other); + } + + // aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool + inline bool _has_compatible_shallow_copy_type(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & from) { + return at::_ops::_has_compatible_shallow_copy_type::redispatch(dispatchKeySet, self, from); + } + + // aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor) + inline ::std::tuple _unique(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted=true, bool return_inverse=false) { + return at::_ops::_unique::redispatch(dispatchKeySet, self, sorted, return_inverse); + } + + // aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + inline ::std::tuple unique_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted=true, bool return_inverse=false, bool return_counts=false) { + return at::_ops::unique_dim::redispatch(dispatchKeySet, self, dim, sorted, return_inverse, return_counts); + } + + // aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor) + inline ::std::tuple unique_consecutive(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse=false, bool return_counts=false, c10::optional dim=c10::nullopt) { + return at::_ops::unique_consecutive::redispatch(dispatchKeySet, self, return_inverse, return_counts, dim); + } + + // aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + inline ::std::tuple unique_dim_consecutive(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool return_inverse=false, bool return_counts=false) { + return at::_ops::unique_dim_consecutive::redispatch(dispatchKeySet, self, dim, return_inverse, return_counts); + } + + // aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _unique2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted=true, bool return_inverse=false, bool return_counts=false) { + return at::_ops::_unique2::redispatch(dispatchKeySet, self, sorted, return_inverse, return_counts); + } + + // aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor + inline at::Tensor _unsafe_view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_unsafe_view::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size)); + } + + // aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor + inline at::Tensor _unsafe_view_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::_unsafe_view::redispatch(dispatchKeySet, self, size); + } + + // aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a) + inline at::Tensor unsqueeze(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::unsqueeze::redispatch(dispatchKeySet, self, dim); + } + + // aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) + inline at::Tensor & unsqueeze_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim) { + return at::_ops::unsqueeze_::redispatch(dispatchKeySet, self, dim); + } + + // aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor + inline at::Tensor vander(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, c10::optional N=c10::nullopt, bool increasing=false) { + return at::_ops::vander::redispatch(dispatchKeySet, x, N, increasing); + } + + // aten::var(Tensor self, bool unbiased=True) -> Tensor + inline at::Tensor var(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased) { + return at::_ops::var::redispatch(dispatchKeySet, self, unbiased); + } + + // aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor + inline at::Tensor var(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) { + return at::_ops::var_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor + inline at::Tensor var(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::var_correction::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) { + return at::_ops::var_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::var_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::var_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out) { + return at::_ops::var_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + inline at::Tensor var(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) { + return at::_ops::var_names_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) { + return at::_ops::var_names_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::var_names_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor + inline at::Tensor var(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::var_correction_names::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::var_correction_names_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim, at::Tensor & out) { + return at::_ops::var_correction_names_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + inline ::std::tuple var_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased) { + return at::_ops::var_mean::redispatch(dispatchKeySet, self, unbiased); + } + + // aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple var_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) { + return at::_ops::var_mean_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple var_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::var_mean_correction::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple var_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) { + return at::_ops::var_mean_names_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple var_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::var_mean_correction_names::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a) + inline at::Tensor view_as(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::view_as::redispatch(dispatchKeySet, self, other); + } + + // aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor + inline at::Tensor where(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::where_self::redispatch(dispatchKeySet, condition, self, other); + } + + // aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & where_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::where_self_out::redispatch(dispatchKeySet, condition, self, other, out); + } + + // aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & where_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::where_self_out::redispatch(dispatchKeySet, condition, self, other, out); + } + + // aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor + inline at::Tensor where(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::where_ScalarSelf::redispatch(dispatchKeySet, condition, self, other); + } + + // aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor + inline at::Tensor where(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::where_ScalarOther::redispatch(dispatchKeySet, condition, self, other); + } + + // aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor + inline at::Tensor where(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) { + return at::_ops::where_Scalar::redispatch(dispatchKeySet, condition, self, other); + } + + // aten::where(Tensor condition) -> Tensor[] + inline ::std::vector where(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition) { + return at::_ops::where::redispatch(dispatchKeySet, condition); + } + + // aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor + inline at::Tensor norm_except_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, int64_t pow=2, int64_t dim=0) { + return at::_ops::norm_except_dim::redispatch(dispatchKeySet, v, pow, dim); + } + + // aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor + inline at::Tensor _weight_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim=0) { + return at::_ops::_weight_norm::redispatch(dispatchKeySet, v, g, dim); + } + + // aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor) + inline ::std::tuple _weight_norm_interface(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim=0) { + return at::_ops::_weight_norm_interface::redispatch(dispatchKeySet, v, g, dim); + } + + // aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) + inline ::std::tuple _weight_norm_interface_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { + return at::_ops::_weight_norm_interface_backward::redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim); + } + + // aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) + inline ::std::tuple _weight_norm_differentiable_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { + return at::_ops::_weight_norm_differentiable_backward::redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim); + } + + // aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor zeros(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::zeros_names::redispatch(dispatchKeySet, size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor zeros(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::zeros_names::redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory); + } + + // aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _efficientzerotensor(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_efficientzerotensor::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _efficientzerotensor(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_efficientzerotensor::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } + + // aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _efficientzerotensor_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_efficientzerotensor::redispatch(dispatchKeySet, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _efficientzerotensor_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_efficientzerotensor::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory); + } + + // aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor zeros(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::zeros::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor zeros(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::zeros::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } + + // aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor zeros_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::zeros::redispatch(dispatchKeySet, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor zeros_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::zeros::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory); + } + + // aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size) { + return at::_ops::zeros_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), out); + } + + // aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::zeros_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), out); + } + + // aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::zeros_out::redispatch(dispatchKeySet, size, out); + } + + // aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::zeros_out::redispatch(dispatchKeySet, size, out); + } + + // aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor zeros_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::zeros_like::redispatch(dispatchKeySet, self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor zeros_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::zeros_like::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format); + } + + // aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor + inline at::Tensor _standard_gamma_grad(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output) { + return at::_ops::_standard_gamma_grad::redispatch(dispatchKeySet, self, output); + } + + // aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor + inline at::Tensor _standard_gamma(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_standard_gamma::redispatch(dispatchKeySet, self, generator); + } + + // aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor + inline at::Tensor _dirichlet_grad(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) { + return at::_ops::_dirichlet_grad::redispatch(dispatchKeySet, x, alpha, total); + } + + // aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor + inline at::Tensor _sample_dirichlet(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_sample_dirichlet::redispatch(dispatchKeySet, self, generator); + } + + // aten::poisson(Tensor self, Generator? generator=None) -> Tensor + inline at::Tensor poisson(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::poisson::redispatch(dispatchKeySet, self, generator); + } + + // aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor + inline at::Tensor binomial(c10::DispatchKeySet dispatchKeySet, const at::Tensor & count, const at::Tensor & prob, c10::optional generator=c10::nullopt) { + return at::_ops::binomial::redispatch(dispatchKeySet, count, prob, generator); + } + + // aten::native_norm(Tensor self, Scalar p=2) -> Tensor + inline at::Tensor native_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p=2) { + return at::_ops::native_norm::redispatch(dispatchKeySet, self, p); + } + + // aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor + inline at::Tensor native_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, c10::optional dtype) { + return at::_ops::native_norm_ScalarOpt_dim_dtype::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype); + } + + // aten::_sparse_sum(Tensor self) -> Tensor + inline at::Tensor _sparse_sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_sparse_sum::redispatch(dispatchKeySet, self); + } + + // aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor + inline at::Tensor _sparse_sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) { + return at::_ops::_sparse_sum_dtype::redispatch(dispatchKeySet, self, dtype); + } + + // aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor + inline at::Tensor _sparse_sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::_sparse_sum_dim::redispatch(dispatchKeySet, self, dim); + } + + // aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor + inline at::Tensor _sparse_sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) { + return at::_ops::_sparse_sum_dim_dtype::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor + inline at::Tensor _sparse_sum_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::_sparse_sum_backward::redispatch(dispatchKeySet, grad, self, dim); + } + + // aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor _sparse_csr_sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_csr_sum_dim_dtype::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor _sparse_csr_prod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_csr_prod_dim_dtype::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + inline at::Tensor _sparse_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_softmax_int::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor _sparse_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_softmax_Dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + inline at::Tensor _sparse_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_sparse_softmax::redispatch(dispatchKeySet, self, dim, half_to_float); + } + + // aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor + inline at::Tensor _sparse_softmax_backward_data(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + return at::_ops::_sparse_softmax_backward_data::redispatch(dispatchKeySet, grad_output, output, dim, self); + } + + // aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + inline at::Tensor _sparse_log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_log_softmax_int::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor _sparse_log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_log_softmax_Dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + inline at::Tensor _sparse_log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_sparse_log_softmax::redispatch(dispatchKeySet, self, dim, half_to_float); + } + + // aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor + inline at::Tensor _sparse_log_softmax_backward_data(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + return at::_ops::_sparse_log_softmax_backward_data::redispatch(dispatchKeySet, grad_output, output, dim, self); + } + + // aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor + inline at::Tensor _spdiags(c10::DispatchKeySet dispatchKeySet, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional layout=c10::nullopt) { + return at::_ops::_spdiags::redispatch(dispatchKeySet, diagonals, offsets, shape, layout); + } + + // aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor + inline at::Tensor norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::ScalarType dtype) { + return at::_ops::norm_ScalarOpt_dtype::redispatch(dispatchKeySet, self, p, dtype); + } + + // aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor + inline at::Tensor norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p=2) { + return at::_ops::norm_Scalar::redispatch(dispatchKeySet, self, p); + } + + // aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor + inline at::Tensor norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) { + return at::_ops::norm_ScalarOpt_dim_dtype::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype); + } + + // aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor + inline at::Tensor norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::norm_ScalarOpt_dim::redispatch(dispatchKeySet, self, p, dim, keepdim); + } + + // aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) { + return at::_ops::norm_dtype_out::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out); + } + + // aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::norm_dtype_out::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out); + } + + // aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::norm_out::redispatch(dispatchKeySet, self, p, dim, keepdim, out); + } + + // aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::norm_out::redispatch(dispatchKeySet, self, p, dim, keepdim, out); + } + + // aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor + inline at::Tensor norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) { + return at::_ops::norm_names_ScalarOpt_dim_dtype::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype); + } + + // aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor + inline at::Tensor norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim=false) { + return at::_ops::norm_names_ScalarOpt_dim::redispatch(dispatchKeySet, self, p, dim, keepdim); + } + + // aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) { + return at::_ops::norm_names_dtype_out::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out); + } + + // aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::norm_names_dtype_out::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out); + } + + // aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim=false) { + return at::_ops::norm_names_out::redispatch(dispatchKeySet, self, p, dim, keepdim, out); + } + + // aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim, at::Tensor & out) { + return at::_ops::norm_names_out::redispatch(dispatchKeySet, self, p, dim, keepdim, out); + } + + // aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) + inline ::std::tuple frexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::frexp_Tensor::redispatch(dispatchKeySet, self); + } + + // aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) + inline ::std::tuple frexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & mantissa, at::Tensor & exponent, const at::Tensor & self) { + return at::_ops::frexp_Tensor_out::redispatch(dispatchKeySet, self, mantissa, exponent); + } + + // aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) + inline ::std::tuple frexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) { + return at::_ops::frexp_Tensor_out::redispatch(dispatchKeySet, self, mantissa, exponent); + } + + // aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + inline at::Tensor frobenius_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::frobenius_norm_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & frobenius_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::frobenius_norm_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & frobenius_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::frobenius_norm_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor + inline at::Tensor nuclear_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool keepdim=false) { + return at::_ops::nuclear_norm::redispatch(dispatchKeySet, self, keepdim); + } + + // aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nuclear_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool keepdim=false) { + return at::_ops::nuclear_norm_out::redispatch(dispatchKeySet, self, keepdim, out); + } + + // aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nuclear_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool keepdim, at::Tensor & out) { + return at::_ops::nuclear_norm_out::redispatch(dispatchKeySet, self, keepdim, out); + } + + // aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor + inline at::Tensor nuclear_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::nuclear_norm_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nuclear_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::nuclear_norm_dim_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nuclear_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::nuclear_norm_dim_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor clone(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::clone::redispatch(dispatchKeySet, self, memory_format); + } + + // aten::positive(Tensor(a) self) -> Tensor(a) + inline at::Tensor positive(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::positive::redispatch(dispatchKeySet, self); + } + + // aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) + inline const at::Tensor & resize_as_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_as_::redispatch(dispatchKeySet, self, the_template, memory_format); + } + + // aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) + inline const at::Tensor & resize_as_sparse_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template) { + return at::_ops::resize_as_sparse_::redispatch(dispatchKeySet, self, the_template); + } + + // aten::zero_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & zero_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::zero_::redispatch(dispatchKeySet, self); + } + + // aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sub_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::sub_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sub_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::sub_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + inline at::Tensor sub(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::sub_Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & sub_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::sub__Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + inline at::Tensor sub(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::sub_Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & sub_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::sub__Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & subtract_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::subtract_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & subtract_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::subtract_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + inline at::Tensor subtract(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::subtract_Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & subtract_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::subtract__Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + inline at::Tensor subtract(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::subtract_Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & subtract_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::subtract__Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + inline at::Tensor rsub(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & heaviside_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & values) { + return at::_ops::heaviside_out::redispatch(dispatchKeySet, self, values, out); + } + + // aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & heaviside_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values, at::Tensor & out) { + return at::_ops::heaviside_out::redispatch(dispatchKeySet, self, values, out); + } + + // aten::heaviside(Tensor self, Tensor values) -> Tensor + inline at::Tensor heaviside(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values) { + return at::_ops::heaviside::redispatch(dispatchKeySet, self, values); + } + + // aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!) + inline at::Tensor & heaviside_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & values) { + return at::_ops::heaviside_::redispatch(dispatchKeySet, self, values); + } + + // aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + inline at::Tensor rsub(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor _sparse_addmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::_sparse_addmm::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha); + } + + // aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sparse_sampled_addmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sparse_sampled_addmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sparse_sampled_addmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::sparse_sampled_addmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor sparse_sampled_addmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sparse_sampled_addmm::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha); + } + + // aten::_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor) + inline ::std::tuple _sparse_mm_reduce_impl(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) { + return at::_ops::_sparse_mm_reduce_impl::redispatch(dispatchKeySet, self, other, reduce); + } + + // aten::_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor) + inline ::std::tuple _sparse_mm_reduce_impl_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array output_mask) { + return at::_ops::_sparse_mm_reduce_impl_backward::redispatch(dispatchKeySet, self, grad_out, weight, reduce, arg_out, output_mask); + } + + // aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::addmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor addmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmm::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha); + } + + // aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & addmm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmm_::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha); + } + + // aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _addmm_activation_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false) { + return at::_ops::_addmm_activation_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, use_gelu, out); + } + + // aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _addmm_activation_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) { + return at::_ops::_addmm_activation_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, use_gelu, out); + } + + // aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor + inline at::Tensor _addmm_activation(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false) { + return at::_ops::_addmm_activation::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, use_gelu); + } + + // aten::_scaled_mm(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False) -> (Tensor, Tensor) + inline ::std::tuple _scaled_mm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias={}, c10::optional out_dtype=c10::nullopt, const c10::optional & scale_a={}, const c10::optional & scale_b={}, const c10::optional & scale_result={}, bool use_fast_accum=false) { + return at::_ops::_scaled_mm::redispatch(dispatchKeySet, self, mat2, bias, out_dtype, scale_a, scale_b, scale_result, use_fast_accum); + } + + // aten::_scaled_mm.out(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False, Tensor(a!) out, Tensor(b!) out_amax) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _scaled_mm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::Tensor & out_amax, const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias={}, c10::optional out_dtype=c10::nullopt, const c10::optional & scale_a={}, const c10::optional & scale_b={}, const c10::optional & scale_result={}, bool use_fast_accum=false) { + return at::_ops::_scaled_mm_out::redispatch(dispatchKeySet, self, mat2, bias, out_dtype, scale_a, scale_b, scale_result, use_fast_accum, out, out_amax); + } + + // aten::_scaled_mm.out(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False, Tensor(a!) out, Tensor(b!) out_amax) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _scaled_mm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias, c10::optional out_dtype, const c10::optional & scale_a, const c10::optional & scale_b, const c10::optional & scale_result, bool use_fast_accum, at::Tensor & out, at::Tensor & out_amax) { + return at::_ops::_scaled_mm_out::redispatch(dispatchKeySet, self, mat2, bias, out_dtype, scale_a, scale_b, scale_result, use_fast_accum, out, out_amax); + } + + // aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_compressed_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_compressed_tensor_comp_plain_value_size::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_compressed_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_compressed_tensor_comp_plain_value_size::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } + + // aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_compressed_tensor_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_compressed_tensor_comp_plain_value_size::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_compressed_tensor_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_compressed_tensor_comp_plain_value_size::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_csr_tensor_crow_col_value_size::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_csr_tensor_crow_col_value_size::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_csc_tensor_ccol_row_value_size::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_csc_tensor_ccol_row_value_size::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_bsr_tensor_crow_col_value_size::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_bsr_tensor_crow_col_value_size::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_bsc_tensor_ccol_row_value_size::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_bsc_tensor_ccol_row_value_size::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_compressed_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::sparse_compressed_tensor_comp_plain_value::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_compressed_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_compressed_tensor_comp_plain_value::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, dtype, layout, device, pin_memory); + } + + // aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::sparse_csr_tensor_crow_col_value::redispatch(dispatchKeySet, crow_indices, col_indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_csr_tensor_crow_col_value::redispatch(dispatchKeySet, crow_indices, col_indices, values, dtype, layout, device, pin_memory); + } + + // aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::sparse_csc_tensor_ccol_row_value::redispatch(dispatchKeySet, ccol_indices, row_indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_csc_tensor_ccol_row_value::redispatch(dispatchKeySet, ccol_indices, row_indices, values, dtype, layout, device, pin_memory); + } + + // aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::sparse_bsr_tensor_crow_col_value::redispatch(dispatchKeySet, crow_indices, col_indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_bsr_tensor_crow_col_value::redispatch(dispatchKeySet, crow_indices, col_indices, values, dtype, layout, device, pin_memory); + } + + // aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::sparse_bsc_tensor_ccol_row_value::redispatch(dispatchKeySet, ccol_indices, row_indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_bsc_tensor_ccol_row_value::redispatch(dispatchKeySet, ccol_indices, row_indices, values, dtype, layout, device, pin_memory); + } + + // aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_compressed_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_compressed_tensor_unsafe::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_compressed_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_compressed_tensor_unsafe::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } + + // aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_compressed_tensor_unsafe_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_compressed_tensor_unsafe::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_compressed_tensor_unsafe_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_compressed_tensor_unsafe::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_csr_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_csr_tensor_unsafe::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_csr_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_csr_tensor_unsafe::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_csc_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_csc_tensor_unsafe::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_csc_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_csc_tensor_unsafe::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_bsr_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_bsr_tensor_unsafe::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_bsr_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_bsr_tensor_unsafe::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_bsc_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_bsc_tensor_unsafe::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_bsc_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_bsc_tensor_unsafe::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_coo_tensor(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_coo_tensor_size::redispatch(dispatchKeySet, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_coo_tensor(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_coo_tensor_size::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor + inline at::Tensor sparse_coo_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options={}, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::sparse_coo_tensor_indices::redispatch(dispatchKeySet, indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); + } + + // aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor + inline at::Tensor sparse_coo_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::sparse_coo_tensor_indices::redispatch(dispatchKeySet, indices, values, dtype, layout, device, pin_memory, is_coalesced); + } + + // aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor + inline at::Tensor sparse_coo_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::sparse_coo_tensor_indices_size::redispatch(dispatchKeySet, indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); + } + + // aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor + inline at::Tensor sparse_coo_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::sparse_coo_tensor_indices_size::redispatch(dispatchKeySet, indices, values, size, dtype, layout, device, pin_memory, is_coalesced); + } + + // aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor + inline at::Tensor _sparse_coo_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_unsafe::redispatch(dispatchKeySet, indices, values, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); + } + + // aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor + inline at::Tensor _sparse_coo_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::_sparse_coo_tensor_unsafe::redispatch(dispatchKeySet, indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, is_coalesced); + } + + // aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor + inline at::Tensor _sparse_coo_tensor_unsafe_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_unsafe::redispatch(dispatchKeySet, indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); + } + + // aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor + inline at::Tensor _sparse_coo_tensor_unsafe_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::_sparse_coo_tensor_unsafe::redispatch(dispatchKeySet, indices, values, size, dtype, layout, device, pin_memory, is_coalesced); + } + + // aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size, bool? is_coalesced=None) -> () + inline void _validate_sparse_coo_tensor_args(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_validate_sparse_coo_tensor_args::redispatch(dispatchKeySet, indices, values, size, is_coalesced); + } + + // aten::_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> () + inline void _validate_sparse_compressed_tensor_args(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) { + return at::_ops::_validate_sparse_compressed_tensor_args::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, layout); + } + + // aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> () + inline void _validate_sparse_csr_tensor_args(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) { + return at::_ops::_validate_sparse_csr_tensor_args::redispatch(dispatchKeySet, crow_indices, col_indices, values, size); + } + + // aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> () + inline void _validate_sparse_csc_tensor_args(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { + return at::_ops::_validate_sparse_csc_tensor_args::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size); + } + + // aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> () + inline void _validate_sparse_bsr_tensor_args(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) { + return at::_ops::_validate_sparse_bsr_tensor_args::redispatch(dispatchKeySet, crow_indices, col_indices, values, size); + } + + // aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> () + inline void _validate_sparse_bsc_tensor_args(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { + return at::_ops::_validate_sparse_bsc_tensor_args::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size); + } + + // aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor _sparse_coo_tensor_with_dims(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::_sparse_coo_tensor_with_dims::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor _sparse_coo_tensor_with_dims(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_coo_tensor_with_dims::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, dtype, layout, device, pin_memory); + } + + // aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor + inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::redispatch(dispatchKeySet, sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); + } + + // aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor + inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::redispatch(dispatchKeySet, sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, dtype, layout, device, pin_memory, is_coalesced); + } + + // aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor + inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); + } + + // aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor + inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced); + } + + // aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) + inline const at::Tensor & sparse_resize_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + return at::_ops::sparse_resize_::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim); + } + + // aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) + inline const at::Tensor & sparse_resize_and_clear_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + return at::_ops::sparse_resize_and_clear_::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim); + } + + // aten::sparse_mask(Tensor self, Tensor mask) -> Tensor + inline at::Tensor sparse_mask(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask) { + return at::_ops::sparse_mask::redispatch(dispatchKeySet, self, mask); + } + + // aten::_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor + inline at::Tensor _sparse_mask_projection(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches=false) { + return at::_ops::_sparse_mask_projection::redispatch(dispatchKeySet, self, mask, accumulate_matches); + } + + // aten::_to_cpu(Tensor[] tensors) -> Tensor[] + inline ::std::vector _to_cpu(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::_to_cpu::redispatch(dispatchKeySet, tensors); + } + + // aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor + inline at::Tensor to_dense(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype=c10::nullopt, c10::optional masked_grad=c10::nullopt) { + return at::_ops::to_dense::redispatch(dispatchKeySet, self, dtype, masked_grad); + } + + // aten::_to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor + inline at::Tensor _to_dense(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype=c10::nullopt, c10::optional masked_grad=c10::nullopt) { + return at::_ops::_to_dense::redispatch(dispatchKeySet, self, dtype, masked_grad); + } + + // aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor + inline at::Tensor to_dense_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, c10::optional masked_grad=c10::nullopt) { + return at::_ops::to_dense_backward::redispatch(dispatchKeySet, grad, input, masked_grad); + } + + // aten::sparse_dim(Tensor self) -> int + inline int64_t sparse_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sparse_dim::redispatch(dispatchKeySet, self); + } + + // aten::_dimI(Tensor self) -> int + inline int64_t _dimI(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_dimI::redispatch(dispatchKeySet, self); + } + + // aten::dense_dim(Tensor self) -> int + inline int64_t dense_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::dense_dim::redispatch(dispatchKeySet, self); + } + + // aten::_dimV(Tensor self) -> int + inline int64_t _dimV(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_dimV::redispatch(dispatchKeySet, self); + } + + // aten::_nnz(Tensor self) -> int + inline int64_t _nnz(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_nnz::redispatch(dispatchKeySet, self); + } + + // aten::coalesce(Tensor(a) self) -> Tensor(a) + inline at::Tensor coalesce(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::coalesce::redispatch(dispatchKeySet, self); + } + + // aten::_coalesce(Tensor self) -> Tensor + inline at::Tensor _coalesce(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_coalesce::redispatch(dispatchKeySet, self); + } + + // aten::is_coalesced(Tensor self) -> bool + inline bool is_coalesced(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_coalesced::redispatch(dispatchKeySet, self); + } + + // aten::_indices(Tensor(a) self) -> Tensor(a) + inline at::Tensor _indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_indices::redispatch(dispatchKeySet, self); + } + + // aten::_values(Tensor(a) self) -> Tensor(a) + inline at::Tensor _values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_values::redispatch(dispatchKeySet, self); + } + + // aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) + inline at::Tensor & _coalesced_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool coalesced) { + return at::_ops::_coalesced_::redispatch(dispatchKeySet, self, coalesced); + } + + // aten::indices(Tensor(a) self) -> Tensor(a) + inline at::Tensor indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::indices::redispatch(dispatchKeySet, self); + } + + // aten::values(Tensor(a) self) -> Tensor(a) + inline at::Tensor values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::values::redispatch(dispatchKeySet, self); + } + + // aten::crow_indices(Tensor(a) self) -> Tensor(a) + inline at::Tensor crow_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::crow_indices::redispatch(dispatchKeySet, self); + } + + // aten::col_indices(Tensor(a) self) -> Tensor(a) + inline at::Tensor col_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::col_indices::redispatch(dispatchKeySet, self); + } + + // aten::ccol_indices(Tensor(a) self) -> Tensor(a) + inline at::Tensor ccol_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::ccol_indices::redispatch(dispatchKeySet, self); + } + + // aten::row_indices(Tensor(a) self) -> Tensor(a) + inline at::Tensor row_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::row_indices::redispatch(dispatchKeySet, self); + } + + // aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hspmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & mat1, const at::Tensor & mat2) { + return at::_ops::hspmm_out::redispatch(dispatchKeySet, mat1, mat2, out); + } + + // aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hspmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) { + return at::_ops::hspmm_out::redispatch(dispatchKeySet, mat1, mat2, out); + } + + // aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor + inline at::Tensor hspmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2) { + return at::_ops::hspmm::redispatch(dispatchKeySet, mat1, mat2); + } + + // aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) + inline at::Tensor & copy_sparse_to_sparse_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy_sparse_to_sparse_::redispatch(dispatchKeySet, self, src, non_blocking); + } + + // aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] + inline ::std::vector unbind(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=0) { + return at::_ops::unbind_int::redispatch(dispatchKeySet, self, dim); + } + + // aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] + inline ::std::vector unbind(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::unbind_Dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor + inline at::Tensor to_sparse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sparse_dim) { + return at::_ops::to_sparse_sparse_dim::redispatch(dispatchKeySet, self, sparse_dim); + } + + // aten::_to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor + inline at::Tensor _to_sparse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sparse_dim) { + return at::_ops::_to_sparse_sparse_dim::redispatch(dispatchKeySet, self, sparse_dim); + } + + // aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor + inline at::Tensor to_sparse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional layout=c10::nullopt, at::OptionalIntArrayRef blocksize=c10::nullopt, c10::optional dense_dim=c10::nullopt) { + return at::_ops::to_sparse::redispatch(dispatchKeySet, self, layout, blocksize, dense_dim); + } + + // aten::_to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor + inline at::Tensor _to_sparse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional layout=c10::nullopt, at::OptionalIntArrayRef blocksize=c10::nullopt, c10::optional dense_dim=c10::nullopt) { + return at::_ops::_to_sparse::redispatch(dispatchKeySet, self, layout, blocksize, dense_dim); + } + + // aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor + inline at::Tensor to_sparse_csr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dense_dim=c10::nullopt) { + return at::_ops::to_sparse_csr::redispatch(dispatchKeySet, self, dense_dim); + } + + // aten::_to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor + inline at::Tensor _to_sparse_csr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dense_dim=c10::nullopt) { + return at::_ops::_to_sparse_csr::redispatch(dispatchKeySet, self, dense_dim); + } + + // aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor + inline at::Tensor to_sparse_csc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dense_dim=c10::nullopt) { + return at::_ops::to_sparse_csc::redispatch(dispatchKeySet, self, dense_dim); + } + + // aten::_to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor + inline at::Tensor _to_sparse_csc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dense_dim=c10::nullopt) { + return at::_ops::_to_sparse_csc::redispatch(dispatchKeySet, self, dense_dim); + } + + // aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + inline at::Tensor to_sparse_bsr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim=c10::nullopt) { + return at::_ops::to_sparse_bsr::redispatch(dispatchKeySet, self, blocksize, dense_dim); + } + + // aten::_to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + inline at::Tensor _to_sparse_bsr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim=c10::nullopt) { + return at::_ops::_to_sparse_bsr::redispatch(dispatchKeySet, self, blocksize, dense_dim); + } + + // aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + inline at::Tensor to_sparse_bsc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim=c10::nullopt) { + return at::_ops::to_sparse_bsc::redispatch(dispatchKeySet, self, blocksize, dense_dim); + } + + // aten::_to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + inline at::Tensor _to_sparse_bsc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim=c10::nullopt) { + return at::_ops::_to_sparse_bsc::redispatch(dispatchKeySet, self, blocksize, dense_dim); + } + + // aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor) + inline ::std::tuple _to_sparse_semi_structured(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dense) { + return at::_ops::_to_sparse_semi_structured::redispatch(dispatchKeySet, dense); + } + + // aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor + inline at::Tensor to_mkldnn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::to_mkldnn::redispatch(dispatchKeySet, self, dtype); + } + + // aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor + inline at::Tensor mkldnn_reorder_conv2d_weight(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=c10::nullopt) { + return at::_ops::mkldnn_reorder_conv2d_weight::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*input_size)) : c10::nullopt); + } + + // aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor + inline at::Tensor mkldnn_reorder_conv2d_weight_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1, at::OptionalSymIntArrayRef input_size=c10::nullopt) { + return at::_ops::mkldnn_reorder_conv2d_weight::redispatch(dispatchKeySet, self, padding, stride, dilation, groups, input_size); + } + + // aten::mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor mkldnn_reorder_conv3d_weight(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::mkldnn_reorder_conv3d_weight::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups); + } + + // aten::mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1) -> Tensor + inline at::Tensor mkldnn_reorder_conv3d_weight_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { + return at::_ops::mkldnn_reorder_conv3d_weight::redispatch(dispatchKeySet, self, padding, stride, dilation, groups); + } + + // aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor + inline at::Tensor to_mkldnn_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input) { + return at::_ops::to_mkldnn_backward::redispatch(dispatchKeySet, grad, input); + } + + // aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor + inline at::Tensor quantize_per_tensor_dynamic(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range) { + return at::_ops::quantize_per_tensor_dynamic::redispatch(dispatchKeySet, self, dtype, reduce_range); + } + + // aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor + inline at::Tensor quantize_per_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor::redispatch(dispatchKeySet, self, scale, zero_point, dtype); + } + + // aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor + inline at::Tensor quantize_per_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensor_qparams::redispatch(dispatchKeySet, self, scale, zero_point, dtype); + } + + // aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[] + inline ::std::vector quantize_per_tensor(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensors::redispatch(dispatchKeySet, tensors, scales, zero_points, dtype); + } + + // aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor + inline at::Tensor quantize_per_channel(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) { + return at::_ops::quantize_per_channel::redispatch(dispatchKeySet, self, scales, zero_points, axis, dtype); + } + + // aten::dequantize.self(Tensor self) -> Tensor + inline at::Tensor dequantize(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::dequantize_self::redispatch(dispatchKeySet, self); + } + + // aten::dequantize.tensors(Tensor[] tensors) -> Tensor[] + inline ::std::vector dequantize(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::dequantize_tensors::redispatch(dispatchKeySet, tensors); + } + + // aten::q_scale(Tensor self) -> float + inline double q_scale(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::q_scale::redispatch(dispatchKeySet, self); + } + + // aten::q_zero_point(Tensor self) -> int + inline int64_t q_zero_point(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::q_zero_point::redispatch(dispatchKeySet, self); + } + + // aten::q_per_channel_scales(Tensor self) -> Tensor + inline at::Tensor q_per_channel_scales(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::q_per_channel_scales::redispatch(dispatchKeySet, self); + } + + // aten::q_per_channel_zero_points(Tensor self) -> Tensor + inline at::Tensor q_per_channel_zero_points(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::q_per_channel_zero_points::redispatch(dispatchKeySet, self); + } + + // aten::q_per_channel_axis(Tensor self) -> int + inline int64_t q_per_channel_axis(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::q_per_channel_axis::redispatch(dispatchKeySet, self); + } + + // aten::int_repr(Tensor self) -> Tensor + inline at::Tensor int_repr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::int_repr::redispatch(dispatchKeySet, self); + } + + // aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor + inline at::Tensor _make_per_tensor_quantized_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point) { + return at::_ops::_make_per_tensor_quantized_tensor::redispatch(dispatchKeySet, self, scale, zero_point); + } + + // aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor + inline at::Tensor _make_per_channel_quantized_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) { + return at::_ops::_make_per_channel_quantized_tensor::redispatch(dispatchKeySet, self, scale, zero_point, axis); + } + + // aten::qscheme(Tensor self) -> QScheme + inline at::QScheme qscheme(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::qscheme::redispatch(dispatchKeySet, self); + } + + // aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor + inline at::Tensor fake_quantize_per_tensor_affine(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_tensor_affine::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max); + } + + // aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor + inline at::Tensor fake_quantize_per_tensor_affine(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_tensor_affine_tensor_qparams::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max); + } + + // aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask) + inline ::std::tuple fake_quantize_per_tensor_affine_cachemask(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_tensor_affine_cachemask::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max); + } + + // aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask) + inline ::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) { + return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::redispatch(dispatchKeySet, self, scale, zero_point, fake_quant_enabled, quant_min, quant_max); + } + + // aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor + inline at::Tensor fake_quantize_per_tensor_affine_cachemask_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & mask) { + return at::_ops::fake_quantize_per_tensor_affine_cachemask_backward::redispatch(dispatchKeySet, grad, mask); + } + + // aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor + inline at::Tensor _fake_quantize_learnable_per_tensor_affine(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) { + return at::_ops::_fake_quantize_learnable_per_tensor_affine::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, grad_factor); + } + + // aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _fake_quantize_learnable_per_tensor_affine_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) { + return at::_ops::_fake_quantize_learnable_per_tensor_affine_backward::redispatch(dispatchKeySet, grad, self, scale, zero_point, quant_min, quant_max, grad_factor); + } + + // aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor + inline at::Tensor fake_quantize_per_channel_affine(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_channel_affine::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max); + } + + // aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask) + inline ::std::tuple fake_quantize_per_channel_affine_cachemask(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_channel_affine_cachemask::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max); + } + + // aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor + inline at::Tensor fake_quantize_per_channel_affine_cachemask_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & mask) { + return at::_ops::fake_quantize_per_channel_affine_cachemask_backward::redispatch(dispatchKeySet, grad, mask); + } + + // aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor + inline at::Tensor _fake_quantize_learnable_per_channel_affine(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) { + return at::_ops::_fake_quantize_learnable_per_channel_affine::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, grad_factor); + } + + // aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _fake_quantize_learnable_per_channel_affine_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) { + return at::_ops::_fake_quantize_learnable_per_channel_affine_backward::redispatch(dispatchKeySet, grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor); + } + + // aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor + inline at::Tensor fused_moving_avg_obs_fake_quant(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::fused_moving_avg_obs_fake_quant::redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); + } + + // aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) + inline ::std::tuple _fused_moving_avg_obs_fq_helper(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::_fused_moving_avg_obs_fq_helper::redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); + } + + // aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int) + inline ::std::tuple _choose_qparams_per_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool reduce_range=false) { + return at::_ops::_choose_qparams_per_tensor::redispatch(dispatchKeySet, self, reduce_range); + } + + // aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor + inline at::Tensor _saturate_weight_to_fp16(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight) { + return at::_ops::_saturate_weight_to_fp16::redispatch(dispatchKeySet, weight); + } + + // aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor) + inline ::std::tuple choose_qparams_optimized(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) { + return at::_ops::choose_qparams_optimized::redispatch(dispatchKeySet, input, numel, n_bins, ratio, bit_width); + } + + // aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a) + inline at::Tensor _autocast_to_reduced_precision(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) { + return at::_ops::_autocast_to_reduced_precision::redispatch(dispatchKeySet, self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype); + } + + // aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a) + inline at::Tensor _autocast_to_full_precision(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) { + return at::_ops::_autocast_to_full_precision::redispatch(dispatchKeySet, self, cuda_enabled, cpu_enabled); + } + + // aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor _to_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, bool non_blocking=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::_to_copy::redispatch(dispatchKeySet, self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor _to_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, c10::optional memory_format) { + return at::_ops::_to_copy::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, non_blocking, memory_format); + } + + // aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + inline at::Tensor to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::to_dtype_layout::redispatch(dispatchKeySet, self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, copy, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + inline at::Tensor to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, bool copy, c10::optional memory_format) { + return at::_ops::to_dtype_layout::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format); + } + + // aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + inline at::Tensor to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::to_device::redispatch(dispatchKeySet, self, device, dtype, non_blocking, copy, memory_format); + } + + // aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + inline at::Tensor to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::to_dtype::redispatch(dispatchKeySet, self, dtype, non_blocking, copy, memory_format); + } + + // aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + inline at::Tensor to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::to_other::redispatch(dispatchKeySet, self, other, non_blocking, copy, memory_format); + } + + // aten::meshgrid(Tensor[] tensors) -> Tensor[] + inline ::std::vector meshgrid(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::meshgrid::redispatch(dispatchKeySet, tensors); + } + + // aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] + inline ::std::vector meshgrid(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, c10::string_view indexing) { + return at::_ops::meshgrid_indexing::redispatch(dispatchKeySet, tensors, indexing); + } + + // aten::cartesian_prod(Tensor[] tensors) -> Tensor + inline at::Tensor cartesian_prod(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::cartesian_prod::redispatch(dispatchKeySet, tensors); + } + + // aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor + inline at::Tensor combinations(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t r=2, bool with_replacement=false) { + return at::_ops::combinations::redispatch(dispatchKeySet, self, r, with_replacement); + } + + // aten::item(Tensor self) -> Scalar + inline at::Scalar item(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::item::redispatch(dispatchKeySet, self); + } + + // aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType + inline at::ScalarType result_type(c10::DispatchKeySet dispatchKeySet, const at::Tensor & tensor, const at::Tensor & other) { + return at::_ops::result_type_Tensor::redispatch(dispatchKeySet, tensor, other); + } + + // aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType + inline at::ScalarType result_type(c10::DispatchKeySet dispatchKeySet, const at::Tensor & tensor, const at::Scalar & other) { + return at::_ops::result_type_Scalar::redispatch(dispatchKeySet, tensor, other); + } + + // aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType + inline at::ScalarType result_type(c10::DispatchKeySet dispatchKeySet, const at::Scalar & scalar, const at::Tensor & tensor) { + return at::_ops::result_type_Scalar_Tensor::redispatch(dispatchKeySet, scalar, tensor); + } + + // aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType + inline at::ScalarType result_type(c10::DispatchKeySet dispatchKeySet, const at::Scalar & scalar1, const at::Scalar & scalar2) { + return at::_ops::result_type_Scalar_Scalar::redispatch(dispatchKeySet, scalar1, scalar2); + } + + // aten::can_cast(ScalarType from, ScalarType to) -> bool + inline bool can_cast(c10::DispatchKeySet dispatchKeySet, at::ScalarType from, at::ScalarType to) { + return at::_ops::can_cast::redispatch(dispatchKeySet, from, to); + } + + // aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType + inline at::ScalarType promote_types(c10::DispatchKeySet dispatchKeySet, at::ScalarType type1, at::ScalarType type2) { + return at::_ops::promote_types::redispatch(dispatchKeySet, type1, type2); + } + + // aten::_local_scalar_dense(Tensor self) -> Scalar + inline at::Scalar _local_scalar_dense(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_local_scalar_dense::redispatch(dispatchKeySet, self); + } + + // aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _lstm_mps(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::_lstm_mps::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + + // aten::lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) + inline ::std::tuple,::std::vector> lstm_mps_backward(c10::DispatchKeySet dispatchKeySet, const c10::optional & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_mps_backward::redispatch(dispatchKeySet, grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + + // aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _thnn_fused_lstm_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional & input_bias={}, const c10::optional & hidden_bias={}) { + return at::_ops::_thnn_fused_lstm_cell::redispatch(dispatchKeySet, input_gates, hidden_gates, cx, input_bias, hidden_bias); + } + + // aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _thnn_fused_lstm_cell_backward_impl(c10::DispatchKeySet dispatchKeySet, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_lstm_cell_backward_impl::redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias); + } + + // aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _thnn_fused_lstm_cell_backward(c10::DispatchKeySet dispatchKeySet, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_lstm_cell_backward::redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias); + } + + // aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _thnn_differentiable_lstm_cell_backward(c10::DispatchKeySet dispatchKeySet, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional & input_bias, const c10::optional & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) { + return at::_ops::_thnn_differentiable_lstm_cell_backward::redispatch(dispatchKeySet, grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy); + } + + // aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor) + inline ::std::tuple _thnn_fused_gru_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional & input_bias={}, const c10::optional & hidden_bias={}) { + return at::_ops::_thnn_fused_gru_cell::redispatch(dispatchKeySet, input_gates, hidden_gates, hx, input_bias, hidden_bias); + } + + // aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _thnn_fused_gru_cell_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_gru_cell_backward::redispatch(dispatchKeySet, grad_hy, workspace, has_bias); + } + + // aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _thnn_differentiable_gru_cell_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias) { + return at::_ops::_thnn_differentiable_gru_cell_backward::redispatch(dispatchKeySet, grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias); + } + + // aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) + inline ::std::tuple lstm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_input::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + + // aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) + inline ::std::tuple lstm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::lstm_data::redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + + // aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + inline ::std::tuple gru(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::gru_input::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + + // aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + inline ::std::tuple gru(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::gru_data::redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + + // aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + inline ::std::tuple rnn_tanh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::rnn_tanh_input::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + + // aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + inline ::std::tuple rnn_tanh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::rnn_tanh_data::redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + + // aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + inline ::std::tuple rnn_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::rnn_relu_input::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + + // aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + inline ::std::tuple rnn_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::rnn_relu_data::redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + + // aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) + inline ::std::tuple lstm_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}) { + return at::_ops::lstm_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh); + } + + // aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + inline at::Tensor gru_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}) { + return at::_ops::gru_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh); + } + + // aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + inline at::Tensor rnn_tanh_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}) { + return at::_ops::rnn_tanh_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh); + } + + // aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + inline at::Tensor rnn_relu_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}) { + return at::_ops::rnn_relu_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh); + } + + // aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor) + inline ::std::tuple quantized_lstm_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + return at::_ops::quantized_lstm_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + + // aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + inline at::Tensor quantized_gru_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + return at::_ops::quantized_gru_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + + // aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + inline at::Tensor quantized_rnn_relu_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + return at::_ops::quantized_rnn_relu_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + + // aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + inline at::Tensor quantized_rnn_tanh_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + return at::_ops::quantized_rnn_tanh_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + + // aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor) + inline ::std::tuple _pack_padded_sequence(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first) { + return at::_ops::_pack_padded_sequence::redispatch(dispatchKeySet, input, lengths, batch_first); + } + + // aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor + inline at::Tensor _pack_padded_sequence_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, at::IntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) { + return at::_ops::_pack_padded_sequence_backward::redispatch(dispatchKeySet, grad, c10::fromIntArrayRefSlow(input_size), batch_sizes, batch_first); + } + + // aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor + inline at::Tensor _pack_padded_sequence_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) { + return at::_ops::_pack_padded_sequence_backward::redispatch(dispatchKeySet, grad, input_size, batch_sizes, batch_first); + } + + // aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor) + inline ::std::tuple _pad_packed_sequence(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) { + return at::_ops::_pad_packed_sequence::redispatch(dispatchKeySet, data, batch_sizes, batch_first, padding_value, total_length); + } + + // aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!) + inline at::Tensor & set_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source) { + return at::_ops::set__source_Storage::redispatch(dispatchKeySet, self, source); + } + + // aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) + inline at::Tensor & set_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set__source_Storage_storage_offset::redispatch(dispatchKeySet, self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); + } + + // aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) + inline at::Tensor & set__symint(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set__source_Storage_storage_offset::redispatch(dispatchKeySet, self, source, storage_offset, size, stride); + } + + // aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) + inline at::Tensor & set_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set__source_Tensor_storage_offset::redispatch(dispatchKeySet, self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); + } + + // aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) + inline at::Tensor & set__symint(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set__source_Tensor_storage_offset::redispatch(dispatchKeySet, self, source, storage_offset, size, stride); + } + + // aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) + inline at::Tensor & set_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source) { + return at::_ops::set__source_Tensor::redispatch(dispatchKeySet, self, source); + } + + // aten::set_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & set_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::set_::redispatch(dispatchKeySet, self); + } + + // aten::lift(Tensor self) -> Tensor + inline at::Tensor lift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::lift::redispatch(dispatchKeySet, self); + } + + // aten::lift_fresh(Tensor(a) self) -> Tensor(a) + inline at::Tensor lift_fresh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::lift_fresh::redispatch(dispatchKeySet, self); + } + + // aten::lift_fresh_copy(Tensor self) -> Tensor + inline at::Tensor lift_fresh_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::lift_fresh_copy::redispatch(dispatchKeySet, self); + } + + // aten::is_set_to(Tensor self, Tensor tensor) -> bool + inline bool is_set_to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor) { + return at::_ops::is_set_to::redispatch(dispatchKeySet, self, tensor); + } + + // aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!) + inline at::Tensor & masked_fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { + return at::_ops::masked_fill__Scalar::redispatch(dispatchKeySet, self, mask, value); + } + + // aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor + inline at::Tensor masked_fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { + return at::_ops::masked_fill_Scalar::redispatch(dispatchKeySet, self, mask, value); + } + + // aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!) + inline at::Tensor & masked_fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { + return at::_ops::masked_fill__Tensor::redispatch(dispatchKeySet, self, mask, value); + } + + // aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor + inline at::Tensor masked_fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { + return at::_ops::masked_fill_Tensor::redispatch(dispatchKeySet, self, mask, value); + } + + // aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!) + inline at::Tensor & masked_scatter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { + return at::_ops::masked_scatter_::redispatch(dispatchKeySet, self, mask, source); + } + + // aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor + inline at::Tensor masked_scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { + return at::_ops::masked_scatter::redispatch(dispatchKeySet, self, mask, source); + } + + // aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor + inline at::Tensor masked_scatter_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, at::IntArrayRef sizes) { + return at::_ops::masked_scatter_backward::redispatch(dispatchKeySet, grad_output, mask, c10::fromIntArrayRefSlow(sizes)); + } + + // aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor + inline at::Tensor masked_scatter_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) { + return at::_ops::masked_scatter_backward::redispatch(dispatchKeySet, grad_output, mask, sizes); + } + + // aten::_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor + inline at::Tensor _masked_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, c10::optional dim=c10::nullopt, c10::optional mask_type=c10::nullopt) { + return at::_ops::_masked_softmax::redispatch(dispatchKeySet, self, mask, dim, mask_type); + } + + // aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor + inline at::Tensor _masked_softmax_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim=c10::nullopt) { + return at::_ops::_masked_softmax_backward::redispatch(dispatchKeySet, grad_output, output, mask, dim); + } + + // aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a) + inline at::Tensor view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::view::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size)); + } + + // aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a) + inline at::Tensor view_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::view::redispatch(dispatchKeySet, self, size); + } + + // aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a) + inline at::Tensor view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) { + return at::_ops::view_dtype::redispatch(dispatchKeySet, self, dtype); + } + + // aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!) + inline at::Tensor & put_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false) { + return at::_ops::put_::redispatch(dispatchKeySet, self, index, source, accumulate); + } + + // aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor + inline at::Tensor put(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false) { + return at::_ops::put::redispatch(dispatchKeySet, self, index, source, accumulate); + } + + // aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_add_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add_out::redispatch(dispatchKeySet, self, dim, index, source, alpha, out); + } + + // aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_add_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::index_add_out::redispatch(dispatchKeySet, self, dim, index, source, alpha, out); + } + + // aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & index_add_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add_::redispatch(dispatchKeySet, self, dim, index, source, alpha); + } + + // aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor + inline at::Tensor index_add(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add::redispatch(dispatchKeySet, self, dim, index, source, alpha); + } + + // aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor + inline at::Tensor index_add(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add_dimname::redispatch(dispatchKeySet, self, dim, index, source, alpha); + } + + // aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_reduce_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) { + return at::_ops::index_reduce_out::redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self, out); + } + + // aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_reduce_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) { + return at::_ops::index_reduce_out::redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self, out); + } + + // aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!) + inline at::Tensor & index_reduce_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) { + return at::_ops::index_reduce_::redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self); + } + + // aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor + inline at::Tensor index_reduce(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) { + return at::_ops::index_reduce::redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self); + } + + // aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) + inline at::Tensor & index_fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill__int_Scalar::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor + inline at::Tensor index_fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_int_Scalar::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) + inline at::Tensor & index_fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill__int_Tensor::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor + inline at::Tensor index_fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_int_Tensor::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!) + inline at::Tensor & index_fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill__Dimname_Scalar::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!) + inline at::Tensor & index_fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill__Dimname_Tensor::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor + inline at::Tensor index_fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_Dimname_Scalar::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor + inline at::Tensor index_fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_Dimname_Tensor::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor + inline at::Tensor scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_src::redispatch(dispatchKeySet, self, dim, index, src); + } + + // aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) + inline at::Tensor & scatter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter__src::redispatch(dispatchKeySet, self, dim, index, src); + } + + // aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_src_out::redispatch(dispatchKeySet, self, dim, index, src, out); + } + + // aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) { + return at::_ops::scatter_src_out::redispatch(dispatchKeySet, self, dim, index, src, out); + } + + // aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor + inline at::Tensor scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter_value::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) + inline at::Tensor & scatter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter__value::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter_value_out::redispatch(dispatchKeySet, self, dim, index, value, out); + } + + // aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { + return at::_ops::scatter_value_out::redispatch(dispatchKeySet, self, dim, index, value, out); + } + + // aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor + inline at::Tensor scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + return at::_ops::scatter_reduce::redispatch(dispatchKeySet, self, dim, index, src, reduce); + } + + // aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!) + inline at::Tensor & scatter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + return at::_ops::scatter__reduce::redispatch(dispatchKeySet, self, dim, index, src, reduce); + } + + // aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + return at::_ops::scatter_reduce_out::redispatch(dispatchKeySet, self, dim, index, src, reduce, out); + } + + // aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) { + return at::_ops::scatter_reduce_out::redispatch(dispatchKeySet, self, dim, index, src, reduce, out); + } + + // aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor + inline at::Tensor scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + return at::_ops::scatter_value_reduce::redispatch(dispatchKeySet, self, dim, index, value, reduce); + } + + // aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!) + inline at::Tensor & scatter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + return at::_ops::scatter__value_reduce::redispatch(dispatchKeySet, self, dim, index, value, reduce); + } + + // aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + return at::_ops::scatter_value_reduce_out::redispatch(dispatchKeySet, self, dim, index, value, reduce, out); + } + + // aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) { + return at::_ops::scatter_value_reduce_out::redispatch(dispatchKeySet, self, dim, index, value, reduce, out); + } + + // aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor + inline at::Tensor scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_dimname_src::redispatch(dispatchKeySet, self, dim, index, src); + } + + // aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor + inline at::Tensor scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter_dimname_value::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor + inline at::Tensor scatter_add(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_add::redispatch(dispatchKeySet, self, dim, index, src); + } + + // aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) + inline at::Tensor & scatter_add_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_add_::redispatch(dispatchKeySet, self, dim, index, src); + } + + // aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_add_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_add_out::redispatch(dispatchKeySet, self, dim, index, src, out); + } + + // aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_add_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) { + return at::_ops::scatter_add_out::redispatch(dispatchKeySet, self, dim, index, src, out); + } + + // aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor + inline at::Tensor scatter_add(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_add_dimname::redispatch(dispatchKeySet, self, dim, index, src); + } + + // aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor + inline at::Tensor scatter_reduce(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) { + return at::_ops::scatter_reduce_two::redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self); + } + + // aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!) + inline at::Tensor & scatter_reduce_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) { + return at::_ops::scatter_reduce__two::redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self); + } + + // aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_reduce_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) { + return at::_ops::scatter_reduce_two_out::redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self, out); + } + + // aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_reduce_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) { + return at::_ops::scatter_reduce_two_out::redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self, out); + } + + // aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & eq_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::eq__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & eq_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::eq__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_and_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_and_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_and_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_and_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_and_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_and_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_and_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::bitwise_and_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor bitwise_and(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_and_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + inline at::Tensor bitwise_and(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_and_Scalar_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor bitwise_and(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_and_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & bitwise_and_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_and__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & bitwise_and_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_and__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor __and__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__and___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor __and__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__and___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & __iand__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::__iand___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & __iand__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::__iand___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_or_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_or_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_or_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_or_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_or_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_or_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_or_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::bitwise_or_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor bitwise_or(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_or_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + inline at::Tensor bitwise_or(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_or_Scalar_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor bitwise_or(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_or_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & bitwise_or_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_or__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & bitwise_or_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_or__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor __or__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__or___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor __or__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__or___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & __ior__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::__ior___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & __ior__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::__ior___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_xor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_xor_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_xor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_xor_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_xor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_xor_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_xor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::bitwise_xor_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor bitwise_xor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_xor_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + inline at::Tensor bitwise_xor(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_xor_Scalar_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor bitwise_xor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_xor_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & bitwise_xor_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_xor__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & bitwise_xor_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_xor__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor __xor__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__xor___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor __xor__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__xor___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & __ixor__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::__ixor___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & __ixor__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::__ixor___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor __lshift__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__lshift___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor __lshift__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__lshift___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & __ilshift__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::__ilshift___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & __ilshift__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::__ilshift___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor bitwise_left_shift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_left_shift_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_left_shift__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_left_shift_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_left_shift_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor bitwise_left_shift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_left_shift_Tensor_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_left_shift__Tensor_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_left_shift_Tensor_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::bitwise_left_shift_Tensor_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + inline at::Tensor bitwise_left_shift(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_left_shift_Scalar_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor __rshift__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__rshift___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor __rshift__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__rshift___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & __irshift__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::__irshift___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & __irshift__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::__irshift___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor bitwise_right_shift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_right_shift_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_right_shift__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_right_shift_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_right_shift_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor bitwise_right_shift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_right_shift_Tensor_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_right_shift__Tensor_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_right_shift_Tensor_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::bitwise_right_shift_Tensor_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + inline at::Tensor bitwise_right_shift(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_right_shift_Scalar_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) + inline at::Tensor & tril_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t diagonal=0) { + return at::_ops::tril_::redispatch(dispatchKeySet, self, diagonal); + } + + // aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) + inline at::Tensor & triu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t diagonal=0) { + return at::_ops::triu_::redispatch(dispatchKeySet, self, diagonal); + } + + // aten::digamma_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & digamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::digamma_::redispatch(dispatchKeySet, self); + } + + // aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!) + inline at::Tensor & lerp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + return at::_ops::lerp__Scalar::redispatch(dispatchKeySet, self, end, weight); + } + + // aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!) + inline at::Tensor & lerp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + return at::_ops::lerp__Tensor::redispatch(dispatchKeySet, self, end, weight); + } + + // aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & addbmm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addbmm_::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha); + } + + // aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addbmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addbmm_out::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha, out); + } + + // aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addbmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::addbmm_out::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha, out); + } + + // aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor addbmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addbmm::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha); + } + + // aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & random_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t from, c10::optional to, c10::optional generator=c10::nullopt) { + return at::_ops::random__from::redispatch(dispatchKeySet, self, from, to, generator); + } + + // aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & random_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t to, c10::optional generator=c10::nullopt) { + return at::_ops::random__to::redispatch(dispatchKeySet, self, to, generator); + } + + // aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & random_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::random_::redispatch(dispatchKeySet, self, generator); + } + + // aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & uniform_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double from=0, double to=1, c10::optional generator=c10::nullopt) { + return at::_ops::uniform_::redispatch(dispatchKeySet, self, from, to, generator); + } + + // aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & cauchy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double median=0, double sigma=1, c10::optional generator=c10::nullopt) { + return at::_ops::cauchy_::redispatch(dispatchKeySet, self, median, sigma, generator); + } + + // aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & log_normal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean=1, double std=2, c10::optional generator=c10::nullopt) { + return at::_ops::log_normal_::redispatch(dispatchKeySet, self, mean, std, generator); + } + + // aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & exponential_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double lambd=1, c10::optional generator=c10::nullopt) { + return at::_ops::exponential_::redispatch(dispatchKeySet, self, lambd, generator); + } + + // aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & geometric_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, c10::optional generator=c10::nullopt) { + return at::_ops::geometric_::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diag_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::diag_out::redispatch(dispatchKeySet, self, diagonal, out); + } + + // aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diag_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) { + return at::_ops::diag_out::redispatch(dispatchKeySet, self, diagonal, out); + } + + // aten::diag(Tensor self, int diagonal=0) -> Tensor + inline at::Tensor diag(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::diag::redispatch(dispatchKeySet, self, diagonal); + } + + // aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cross_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional dim=c10::nullopt) { + return at::_ops::cross_out::redispatch(dispatchKeySet, self, other, dim, out); + } + + // aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cross_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional dim, at::Tensor & out) { + return at::_ops::cross_out::redispatch(dispatchKeySet, self, other, dim, out); + } + + // aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor + inline at::Tensor cross(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional dim=c10::nullopt) { + return at::_ops::cross::redispatch(dispatchKeySet, self, other, dim); + } + + // aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & triu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::triu_out::redispatch(dispatchKeySet, self, diagonal, out); + } + + // aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & triu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) { + return at::_ops::triu_out::redispatch(dispatchKeySet, self, diagonal, out); + } + + // aten::triu(Tensor self, int diagonal=0) -> Tensor + inline at::Tensor triu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::triu::redispatch(dispatchKeySet, self, diagonal); + } + + // aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tril_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::tril_out::redispatch(dispatchKeySet, self, diagonal, out); + } + + // aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tril_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) { + return at::_ops::tril_out::redispatch(dispatchKeySet, self, diagonal, out); + } + + // aten::tril(Tensor self, int diagonal=0) -> Tensor + inline at::Tensor tril(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::tril::redispatch(dispatchKeySet, self, diagonal); + } + + // aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor tril_indices(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong) { + return at::_ops::tril_indices::redispatch(dispatchKeySet, row, col, offset, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor tril_indices(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::tril_indices::redispatch(dispatchKeySet, row, col, offset, dtype, layout, device, pin_memory); + } + + // aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor triu_indices(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong) { + return at::_ops::triu_indices::redispatch(dispatchKeySet, row, col, offset, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor triu_indices(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::triu_indices::redispatch(dispatchKeySet, row, col, offset, dtype, layout, device, pin_memory); + } + + // aten::trace(Tensor self) -> Tensor + inline at::Tensor trace(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::trace::redispatch(dispatchKeySet, self); + } + + // aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor + inline at::Tensor trace_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, at::IntArrayRef sizes) { + return at::_ops::trace_backward::redispatch(dispatchKeySet, grad, c10::fromIntArrayRefSlow(sizes)); + } + + // aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor + inline at::Tensor trace_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef sizes) { + return at::_ops::trace_backward::redispatch(dispatchKeySet, grad, sizes); + } + + // aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ne_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ne_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ne_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::ne_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ne.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor ne(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ne_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ne_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ne_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ne_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::ne_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ne.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor ne(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ne_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & ne_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::ne__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & ne_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::ne__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & not_equal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::not_equal_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & not_equal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::not_equal_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor not_equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::not_equal_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & not_equal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::not_equal_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & not_equal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::not_equal_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor not_equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::not_equal_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & not_equal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::not_equal__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & not_equal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::not_equal__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eq_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::eq_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eq_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::eq_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::eq.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor eq(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::eq_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eq_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::eq_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eq_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::eq_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::eq.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor eq(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::eq_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ge_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ge_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ge_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::ge_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ge.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor ge(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ge_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ge_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ge_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ge_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::ge_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ge.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor ge(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ge_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & ge_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::ge__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & ge_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::ge__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_equal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_equal_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_equal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::greater_equal_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor greater_equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_equal_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_equal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_equal_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_equal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::greater_equal_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor greater_equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_equal_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & greater_equal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_equal__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & greater_equal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_equal__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & le_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::le_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & le_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::le_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::le.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor le(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::le_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & le_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::le_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & le_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::le_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::le.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor le(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::le_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & le_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::le__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & le_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::le__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_equal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_equal_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_equal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::less_equal_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor less_equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_equal_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_equal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_equal_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_equal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::less_equal_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor less_equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_equal_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & less_equal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_equal__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & less_equal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_equal__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::gt_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::gt_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::gt.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor gt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::gt_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gt_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::gt_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::gt.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor gt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gt_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & gt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::gt__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & gt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::gt__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::greater_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor greater(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::greater_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor greater(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & greater_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & greater_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::lt_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::lt_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::lt.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor lt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::lt_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lt_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::lt_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::lt.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor lt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lt_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & lt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::lt__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & lt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::lt__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::less_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor less(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::less_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor less(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & less_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::less__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & less_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::less__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & take_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & index) { + return at::_ops::take_out::redispatch(dispatchKeySet, self, index, out); + } + + // aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & take_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, at::Tensor & out) { + return at::_ops::take_out::redispatch(dispatchKeySet, self, index, out); + } + + // aten::take(Tensor self, Tensor index) -> Tensor + inline at::Tensor take(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index) { + return at::_ops::take::redispatch(dispatchKeySet, self, index); + } + + // aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & take_along_dim_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::optional dim=c10::nullopt) { + return at::_ops::take_along_dim_out::redispatch(dispatchKeySet, self, indices, dim, out); + } + + // aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & take_along_dim_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::optional dim, at::Tensor & out) { + return at::_ops::take_along_dim_out::redispatch(dispatchKeySet, self, indices, dim, out); + } + + // aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor + inline at::Tensor take_along_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::optional dim=c10::nullopt) { + return at::_ops::take_along_dim::redispatch(dispatchKeySet, self, indices, dim); + } + + // aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_select_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_select_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) { + return at::_ops::index_select_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::index_select(Tensor self, int dim, Tensor index) -> Tensor + inline at::Tensor index_select(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select::redispatch(dispatchKeySet, self, dim, index); + } + + // aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_select_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index) { + return at::_ops::index_select_dimname_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_select_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) { + return at::_ops::index_select_dimname_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor + inline at::Tensor index_select(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index) { + return at::_ops::index_select_dimname::redispatch(dispatchKeySet, self, dim, index); + } + + // aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor + inline at::Tensor index_select_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_backward::redispatch(dispatchKeySet, grad, c10::fromIntArrayRefSlow(self_sizes), dim, index); + } + + // aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor + inline at::Tensor index_select_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_backward::redispatch(dispatchKeySet, grad, self_sizes, dim, index); + } + + // aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_select_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask) { + return at::_ops::masked_select_out::redispatch(dispatchKeySet, self, mask, out); + } + + // aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_select_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) { + return at::_ops::masked_select_out::redispatch(dispatchKeySet, self, mask, out); + } + + // aten::masked_select(Tensor self, Tensor mask) -> Tensor + inline at::Tensor masked_select(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask) { + return at::_ops::masked_select::redispatch(dispatchKeySet, self, mask); + } + + // aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor + inline at::Tensor masked_select_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) { + return at::_ops::masked_select_backward::redispatch(dispatchKeySet, grad, input, mask); + } + + // aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nonzero_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::nonzero_out::redispatch(dispatchKeySet, self, out); + } + + // aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nonzero_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::nonzero_out::redispatch(dispatchKeySet, self, out); + } + + // aten::nonzero(Tensor self) -> Tensor + inline at::Tensor nonzero(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::nonzero::redispatch(dispatchKeySet, self); + } + + // aten::nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nonzero_static_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t size, int64_t fill_value=-1) { + return at::_ops::nonzero_static_out::redispatch(dispatchKeySet, self, size, fill_value, out); + } + + // aten::nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nonzero_static_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t size, int64_t fill_value, at::Tensor & out) { + return at::_ops::nonzero_static_out::redispatch(dispatchKeySet, self, size, fill_value, out); + } + + // aten::nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor + inline at::Tensor nonzero_static(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t size, int64_t fill_value=-1) { + return at::_ops::nonzero_static::redispatch(dispatchKeySet, self, size, fill_value); + } + + // aten::nonzero_numpy(Tensor self) -> Tensor[] + inline ::std::vector nonzero_numpy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::nonzero_numpy::redispatch(dispatchKeySet, self); + } + + // aten::argwhere(Tensor self) -> Tensor + inline at::Tensor argwhere(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::argwhere::redispatch(dispatchKeySet, self); + } + + // aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gather_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_out::redispatch(dispatchKeySet, self, dim, index, sparse_grad, out); + } + + // aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gather_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { + return at::_ops::gather_out::redispatch(dispatchKeySet, self, dim, index, sparse_grad, out); + } + + // aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor + inline at::Tensor gather(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather::redispatch(dispatchKeySet, self, dim, index, sparse_grad); + } + + // aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor + inline at::Tensor gather_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) { + return at::_ops::gather_backward::redispatch(dispatchKeySet, grad, self, dim, index, sparse_grad); + } + + // aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gather_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_dimname_out::redispatch(dispatchKeySet, self, dim, index, sparse_grad, out); + } + + // aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gather_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { + return at::_ops::gather_dimname_out::redispatch(dispatchKeySet, self, dim, index, sparse_grad, out); + } + + // aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor + inline at::Tensor gather(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_dimname::redispatch(dispatchKeySet, self, dim, index, sparse_grad); + } + + // aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor + inline at::Tensor _gather_sparse_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) { + return at::_ops::_gather_sparse_backward::redispatch(dispatchKeySet, self, dim, index, grad); + } + + // aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addcmul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcmul_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addcmul_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) { + return at::_ops::addcmul_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor + inline at::Tensor addcmul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcmul::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) + inline at::Tensor & addcmul_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcmul_::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addcdiv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcdiv_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addcdiv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) { + return at::_ops::addcdiv_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor + inline at::Tensor addcdiv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcdiv::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) + inline at::Tensor & addcdiv_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcdiv_::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor + inline at::Tensor cross_entropy_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100, double label_smoothing=0.0) { + return at::_ops::cross_entropy_loss::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, label_smoothing); + } + + // aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor + inline at::Tensor cross_entropy_loss_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100, double label_smoothing=0.0) { + return at::_ops::cross_entropy_loss::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, label_smoothing); + } + + // aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) + inline ::std::tuple triangular_solve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false) { + return at::_ops::triangular_solve_X::redispatch(dispatchKeySet, self, A, upper, transpose, unitriangular, X, M); + } + + // aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) + inline ::std::tuple triangular_solve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) { + return at::_ops::triangular_solve_X::redispatch(dispatchKeySet, self, A, upper, transpose, unitriangular, X, M); + } + + // aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) + inline ::std::tuple triangular_solve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false) { + return at::_ops::triangular_solve::redispatch(dispatchKeySet, self, A, upper, transpose, unitriangular); + } + + // aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> () + inline void _linalg_check_errors(c10::DispatchKeySet dispatchKeySet, const at::Tensor & info, c10::string_view api_name, bool is_matrix) { + return at::_ops::_linalg_check_errors::redispatch(dispatchKeySet, info, api_name, is_matrix); + } + + // aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_solve_triangular_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false) { + return at::_ops::linalg_solve_triangular_out::redispatch(dispatchKeySet, self, B, upper, left, unitriangular, out); + } + + // aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_solve_triangular_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) { + return at::_ops::linalg_solve_triangular_out::redispatch(dispatchKeySet, self, B, upper, left, unitriangular, out); + } + + // aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor + inline at::Tensor linalg_solve_triangular(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false) { + return at::_ops::linalg_solve_triangular::redispatch(dispatchKeySet, self, B, upper, left, unitriangular); + } + + // aten::linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor + inline at::Tensor linalg_vander(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, c10::optional N=c10::nullopt) { + return at::_ops::linalg_vander::redispatch(dispatchKeySet, x, N.has_value() ? c10::make_optional(c10::SymInt(*N)) : c10::nullopt); + } + + // aten::linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor + inline at::Tensor linalg_vander_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, c10::optional N=c10::nullopt) { + return at::_ops::linalg_vander::redispatch(dispatchKeySet, x, N); + } + + // aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) + inline ::std::tuple svd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & U, at::Tensor & S, at::Tensor & V, const at::Tensor & self, bool some=true, bool compute_uv=true) { + return at::_ops::svd_U::redispatch(dispatchKeySet, self, some, compute_uv, U, S, V); + } + + // aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) + inline ::std::tuple svd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) { + return at::_ops::svd_U::redispatch(dispatchKeySet, self, some, compute_uv, U, S, V); + } + + // aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) + inline ::std::tuple svd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some=true, bool compute_uv=true) { + return at::_ops::svd::redispatch(dispatchKeySet, self, some, compute_uv); + } + + // aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a) + inline at::Tensor swapaxes(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t axis0, int64_t axis1) { + return at::_ops::swapaxes::redispatch(dispatchKeySet, self, axis0, axis1); + } + + // aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!) + inline at::Tensor & swapaxes_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t axis0, int64_t axis1) { + return at::_ops::swapaxes_::redispatch(dispatchKeySet, self, axis0, axis1); + } + + // aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a) + inline at::Tensor swapdims(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::swapdims::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + inline at::Tensor & swapdims_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::swapdims_::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cholesky_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool upper=false) { + return at::_ops::cholesky_out::redispatch(dispatchKeySet, self, upper, out); + } + + // aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cholesky_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) { + return at::_ops::cholesky_out::redispatch(dispatchKeySet, self, upper, out); + } + + // aten::cholesky(Tensor self, bool upper=False) -> Tensor + inline at::Tensor cholesky(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper=false) { + return at::_ops::cholesky::redispatch(dispatchKeySet, self, upper); + } + + // aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cholesky_solve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & input2, bool upper=false) { + return at::_ops::cholesky_solve_out::redispatch(dispatchKeySet, self, input2, upper, out); + } + + // aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cholesky_solve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, bool upper, at::Tensor & out) { + return at::_ops::cholesky_solve_out::redispatch(dispatchKeySet, self, input2, upper, out); + } + + // aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor + inline at::Tensor cholesky_solve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, bool upper=false) { + return at::_ops::cholesky_solve::redispatch(dispatchKeySet, self, input2, upper); + } + + // aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor + inline at::Tensor _cholesky_solve_helper(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper) { + return at::_ops::_cholesky_solve_helper::redispatch(dispatchKeySet, self, A, upper); + } + + // aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor + inline at::Tensor cholesky_inverse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper=false) { + return at::_ops::cholesky_inverse::redispatch(dispatchKeySet, self, upper); + } + + // aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cholesky_inverse_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool upper=false) { + return at::_ops::cholesky_inverse_out::redispatch(dispatchKeySet, self, upper, out); + } + + // aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cholesky_inverse_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) { + return at::_ops::cholesky_inverse_out::redispatch(dispatchKeySet, self, upper, out); + } + + // aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + inline ::std::tuple qr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & Q, at::Tensor & R, const at::Tensor & self, bool some=true) { + return at::_ops::qr_Q::redispatch(dispatchKeySet, self, some, Q, R); + } + + // aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + inline ::std::tuple qr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R) { + return at::_ops::qr_Q::redispatch(dispatchKeySet, self, some, Q, R); + } + + // aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) + inline ::std::tuple qr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some=true) { + return at::_ops::qr::redispatch(dispatchKeySet, self, some); + } + + // aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) + inline ::std::tuple geqrf_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & a, at::Tensor & tau, const at::Tensor & self) { + return at::_ops::geqrf_a::redispatch(dispatchKeySet, self, a, tau); + } + + // aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) + inline ::std::tuple geqrf_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & a, at::Tensor & tau) { + return at::_ops::geqrf_a::redispatch(dispatchKeySet, self, a, tau); + } + + // aten::geqrf(Tensor self) -> (Tensor a, Tensor tau) + inline ::std::tuple geqrf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::geqrf::redispatch(dispatchKeySet, self); + } + + // aten::orgqr(Tensor self, Tensor input2) -> Tensor + inline at::Tensor orgqr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2) { + return at::_ops::orgqr::redispatch(dispatchKeySet, self, input2); + } + + // aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & orgqr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & input2) { + return at::_ops::orgqr_out::redispatch(dispatchKeySet, self, input2, out); + } + + // aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & orgqr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) { + return at::_ops::orgqr_out::redispatch(dispatchKeySet, self, input2, out); + } + + // aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ormqr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false) { + return at::_ops::ormqr_out::redispatch(dispatchKeySet, self, input2, input3, left, transpose, out); + } + + // aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ormqr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) { + return at::_ops::ormqr_out::redispatch(dispatchKeySet, self, input2, input3, left, transpose, out); + } + + // aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor + inline at::Tensor ormqr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false) { + return at::_ops::ormqr::redispatch(dispatchKeySet, self, input2, input3, left, transpose); + } + + // aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info) + inline ::std::tuple _lu_with_info(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool pivot=true, bool check_errors=true) { + return at::_ops::_lu_with_info::redispatch(dispatchKeySet, self, pivot, check_errors); + } + + // aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lu_solve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { + return at::_ops::lu_solve_out::redispatch(dispatchKeySet, self, LU_data, LU_pivots, out); + } + + // aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lu_solve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) { + return at::_ops::lu_solve_out::redispatch(dispatchKeySet, self, LU_data, LU_pivots, out); + } + + // aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor + inline at::Tensor lu_solve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { + return at::_ops::lu_solve::redispatch(dispatchKeySet, self, LU_data, LU_pivots); + } + + // aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U) + inline ::std::tuple lu_unpack(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true) { + return at::_ops::lu_unpack::redispatch(dispatchKeySet, LU_data, LU_pivots, unpack_data, unpack_pivots); + } + + // aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) + inline ::std::tuple lu_unpack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true) { + return at::_ops::lu_unpack_out::redispatch(dispatchKeySet, LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U); + } + + // aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) + inline ::std::tuple lu_unpack_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) { + return at::_ops::lu_unpack_out::redispatch(dispatchKeySet, LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U); + } + + // aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multinomial_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t num_samples, bool replacement=false, c10::optional generator=c10::nullopt) { + return at::_ops::multinomial_out::redispatch(dispatchKeySet, self, num_samples, replacement, generator, out); + } + + // aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multinomial_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional generator, at::Tensor & out) { + return at::_ops::multinomial_out::redispatch(dispatchKeySet, self, num_samples, replacement, generator, out); + } + + // aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor + inline at::Tensor multinomial(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_samples, bool replacement=false, c10::optional generator=c10::nullopt) { + return at::_ops::multinomial::redispatch(dispatchKeySet, self, num_samples, replacement, generator); + } + + // aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lgamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::lgamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lgamma_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::lgamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::lgamma_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & lgamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::lgamma_::redispatch(dispatchKeySet, self); + } + + // aten::lgamma(Tensor self) -> Tensor + inline at::Tensor lgamma(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::lgamma::redispatch(dispatchKeySet, self); + } + + // aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & digamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::digamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & digamma_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::digamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::digamma(Tensor self) -> Tensor + inline at::Tensor digamma(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::digamma::redispatch(dispatchKeySet, self); + } + + // aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & polygamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n, const at::Tensor & self) { + return at::_ops::polygamma_out::redispatch(dispatchKeySet, n, self, out); + } + + // aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & polygamma_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self, at::Tensor & out) { + return at::_ops::polygamma_out::redispatch(dispatchKeySet, n, self, out); + } + + // aten::polygamma(int n, Tensor self) -> Tensor + inline at::Tensor polygamma(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self) { + return at::_ops::polygamma::redispatch(dispatchKeySet, n, self); + } + + // aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!) + inline at::Tensor & polygamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t n) { + return at::_ops::polygamma_::redispatch(dispatchKeySet, self, n); + } + + // aten::erfinv(Tensor self) -> Tensor + inline at::Tensor erfinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::erfinv::redispatch(dispatchKeySet, self); + } + + // aten::erfinv_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & erfinv_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::erfinv_::redispatch(dispatchKeySet, self); + } + + // aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & erfinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::erfinv_out::redispatch(dispatchKeySet, self, out); + } + + // aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & erfinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::erfinv_out::redispatch(dispatchKeySet, self, out); + } + + // aten::i0(Tensor self) -> Tensor + inline at::Tensor i0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::i0::redispatch(dispatchKeySet, self); + } + + // aten::i0_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & i0_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::i0_::redispatch(dispatchKeySet, self); + } + + // aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & i0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::i0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & i0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::i0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sign(Tensor self) -> Tensor + inline at::Tensor sign(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sign::redispatch(dispatchKeySet, self); + } + + // aten::sign_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sign_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sign_::redispatch(dispatchKeySet, self); + } + + // aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sign_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sign_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sign_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sign_out::redispatch(dispatchKeySet, self, out); + } + + // aten::signbit(Tensor self) -> Tensor + inline at::Tensor signbit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::signbit::redispatch(dispatchKeySet, self); + } + + // aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & signbit_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::signbit_out::redispatch(dispatchKeySet, self, out); + } + + // aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & signbit_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::signbit_out::redispatch(dispatchKeySet, self, out); + } + + // aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor + inline at::Tensor dist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p=2) { + return at::_ops::dist::redispatch(dispatchKeySet, self, other, p); + } + + // aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & atan2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::atan2_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & atan2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::atan2_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & atan2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::atan2_::redispatch(dispatchKeySet, self, other); + } + + // aten::atan2(Tensor self, Tensor other) -> Tensor + inline at::Tensor atan2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::atan2::redispatch(dispatchKeySet, self, other); + } + + // aten::arctan2(Tensor self, Tensor other) -> Tensor + inline at::Tensor arctan2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::arctan2::redispatch(dispatchKeySet, self, other); + } + + // aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arctan2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::arctan2_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arctan2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::arctan2_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & arctan2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::arctan2_::redispatch(dispatchKeySet, self, other); + } + + // aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lerp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + return at::_ops::lerp_Scalar_out::redispatch(dispatchKeySet, self, end, weight, out); + } + + // aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lerp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) { + return at::_ops::lerp_Scalar_out::redispatch(dispatchKeySet, self, end, weight, out); + } + + // aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lerp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + return at::_ops::lerp_Tensor_out::redispatch(dispatchKeySet, self, end, weight, out); + } + + // aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lerp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) { + return at::_ops::lerp_Tensor_out::redispatch(dispatchKeySet, self, end, weight, out); + } + + // aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor + inline at::Tensor lerp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + return at::_ops::lerp_Scalar::redispatch(dispatchKeySet, self, end, weight); + } + + // aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor + inline at::Tensor lerp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + return at::_ops::lerp_Tensor::redispatch(dispatchKeySet, self, end, weight); + } + + // aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & histc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) { + return at::_ops::histc_out::redispatch(dispatchKeySet, self, bins, min, max, out); + } + + // aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & histc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) { + return at::_ops::histc_out::redispatch(dispatchKeySet, self, bins, min, max, out); + } + + // aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor + inline at::Tensor histc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) { + return at::_ops::histc::redispatch(dispatchKeySet, self, bins, min, max); + } + + // aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) + inline ::std::tuple histogram_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogram_bins_tensor_out::redispatch(dispatchKeySet, self, bins, weight, density, hist, bin_edges); + } + + // aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) + inline ::std::tuple histogram_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) { + return at::_ops::histogram_bins_tensor_out::redispatch(dispatchKeySet, self, bins, weight, density, hist, bin_edges); + } + + // aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) + inline ::std::tuple histogram(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogram_bins_tensor::redispatch(dispatchKeySet, self, bins, weight, density); + } + + // aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) + inline ::std::tuple histogram_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, int64_t bins=100, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogram_bin_ct_out::redispatch(dispatchKeySet, self, bins, range, weight, density, hist, bin_edges); + } + + // aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) + inline ::std::tuple histogram_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) { + return at::_ops::histogram_bin_ct_out::redispatch(dispatchKeySet, self, bins, range, weight, density, hist, bin_edges); + } + + // aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) + inline ::std::tuple histogram(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins=100, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogram_bin_ct::redispatch(dispatchKeySet, self, bins, range, weight, density); + } + + // aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[] + inline ::std::vector _histogramdd_bin_edges(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_bin_edges::redispatch(dispatchKeySet, self, bins, range, weight, density); + } + + // aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor + inline at::Tensor _histogramdd_from_bin_cts(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_from_bin_cts::redispatch(dispatchKeySet, self, bins, range, weight, density); + } + + // aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor + inline at::Tensor _histogramdd_from_bin_tensors(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, const c10::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_from_bin_tensors::redispatch(dispatchKeySet, self, bins, weight, density); + } + + // aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) + inline ::std::tuple> histogramdd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogramdd::redispatch(dispatchKeySet, self, bins, range, weight, density); + } + + // aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) + inline ::std::tuple> histogramdd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogramdd_int_bins::redispatch(dispatchKeySet, self, bins, range, weight, density); + } + + // aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) + inline ::std::tuple> histogramdd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogramdd_TensorList_bins::redispatch(dispatchKeySet, self, bins, range, weight, density); + } + + // aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::fmod_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::fmod_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor fmod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::fmod_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & fmod_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::fmod__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmod_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::fmod_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor fmod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmod_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & fmod_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmod__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hypot_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::hypot_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hypot_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::hypot_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::hypot(Tensor self, Tensor other) -> Tensor + inline at::Tensor hypot(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::hypot::redispatch(dispatchKeySet, self, other); + } + + // aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & hypot_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::hypot_::redispatch(dispatchKeySet, self, other); + } + + // aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & igamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igamma_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & igamma_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::igamma_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::igamma(Tensor self, Tensor other) -> Tensor + inline at::Tensor igamma(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igamma::redispatch(dispatchKeySet, self, other); + } + + // aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & igamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::igamma_::redispatch(dispatchKeySet, self, other); + } + + // aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & igammac_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igammac_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & igammac_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::igammac_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::igammac(Tensor self, Tensor other) -> Tensor + inline at::Tensor igammac(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igammac::redispatch(dispatchKeySet, self, other); + } + + // aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & igammac_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::igammac_::redispatch(dispatchKeySet, self, other); + } + + // aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nextafter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::nextafter_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nextafter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::nextafter_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::nextafter(Tensor self, Tensor other) -> Tensor + inline at::Tensor nextafter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::nextafter::redispatch(dispatchKeySet, self, other); + } + + // aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & nextafter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::nextafter_::redispatch(dispatchKeySet, self, other); + } + + // aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & remainder_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::remainder_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & remainder_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::remainder_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor remainder(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::remainder_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & remainder_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::remainder__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & remainder_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::remainder_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & remainder_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::remainder_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor remainder(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::remainder_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & remainder_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::remainder__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + inline at::Tensor remainder(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::remainder_Scalar_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::min(Tensor self) -> Tensor + inline at::Tensor min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::min::redispatch(dispatchKeySet, self); + } + + // aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & min_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::min_unary_out::redispatch(dispatchKeySet, self, out); + } + + // aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & min_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::min_unary_out::redispatch(dispatchKeySet, self, out); + } + + // aten::fmin(Tensor self, Tensor other) -> Tensor + inline at::Tensor fmin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmin::redispatch(dispatchKeySet, self, other); + } + + // aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmin_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::fmin_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::max(Tensor self) -> Tensor + inline at::Tensor max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::max::redispatch(dispatchKeySet, self); + } + + // aten::fmax(Tensor self, Tensor other) -> Tensor + inline at::Tensor fmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmax::redispatch(dispatchKeySet, self, other); + } + + // aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmax_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::fmax_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::maximum(Tensor self, Tensor other) -> Tensor + inline at::Tensor maximum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::maximum::redispatch(dispatchKeySet, self, other); + } + + // aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & maximum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::maximum_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & maximum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::maximum_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::max.other(Tensor self, Tensor other) -> Tensor + inline at::Tensor max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::max_other::redispatch(dispatchKeySet, self, other); + } + + // aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::max_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::max_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::max_unary_out::redispatch(dispatchKeySet, self, out); + } + + // aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::max_unary_out::redispatch(dispatchKeySet, self, out); + } + + // aten::minimum(Tensor self, Tensor other) -> Tensor + inline at::Tensor minimum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::minimum::redispatch(dispatchKeySet, self, other); + } + + // aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & minimum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::minimum_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & minimum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::minimum_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & min_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::min_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & min_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::min_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::min.other(Tensor self, Tensor other) -> Tensor + inline at::Tensor min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::min_other::redispatch(dispatchKeySet, self, other); + } + + // aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + inline at::Tensor quantile(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation); + } + + // aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantile_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantile_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::quantile_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + inline at::Tensor quantile(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_scalar::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation); + } + + // aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantile_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_scalar_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantile_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::quantile_scalar_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + inline at::Tensor nanquantile(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::nanquantile::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation); + } + + // aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanquantile_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::nanquantile_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanquantile_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::nanquantile_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + inline at::Tensor nanquantile(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::nanquantile_scalar::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation); + } + + // aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanquantile_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::nanquantile_scalar_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanquantile_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::nanquantile_scalar_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim=-1, bool descending=false) { + return at::_ops::sort_values::redispatch(dispatchKeySet, self, dim, descending, values, indices); + } + + // aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) { + return at::_ops::sort_values::redispatch(dispatchKeySet, self, dim, descending, values, indices); + } + + // aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::optional stable, int64_t dim=-1, bool descending=false) { + return at::_ops::sort_values_stable::redispatch(dispatchKeySet, self, stable, dim, descending, values, indices); + } + + // aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) { + return at::_ops::sort_values_stable::redispatch(dispatchKeySet, self, stable, dim, descending, values, indices); + } + + // aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) + inline ::std::tuple sort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=-1, bool descending=false) { + return at::_ops::sort::redispatch(dispatchKeySet, self, dim, descending); + } + + // aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) + inline ::std::tuple sort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional stable, int64_t dim=-1, bool descending=false) { + return at::_ops::sort_stable::redispatch(dispatchKeySet, self, stable, dim, descending); + } + + // aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool descending=false) { + return at::_ops::sort_dimname_values::redispatch(dispatchKeySet, self, dim, descending, values, indices); + } + + // aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) { + return at::_ops::sort_dimname_values::redispatch(dispatchKeySet, self, dim, descending, values, indices); + } + + // aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::optional stable, at::Dimname dim, bool descending=false) { + return at::_ops::sort_dimname_values_stable::redispatch(dispatchKeySet, self, stable, dim, descending, values, indices); + } + + // aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) { + return at::_ops::sort_dimname_values_stable::redispatch(dispatchKeySet, self, stable, dim, descending, values, indices); + } + + // aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) + inline ::std::tuple sort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending=false) { + return at::_ops::sort_dimname::redispatch(dispatchKeySet, self, dim, descending); + } + + // aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) + inline ::std::tuple sort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional stable, at::Dimname dim, bool descending=false) { + return at::_ops::sort_dimname_stable::redispatch(dispatchKeySet, self, stable, dim, descending); + } + + // aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & msort_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::msort_out::redispatch(dispatchKeySet, self, out); + } + + // aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & msort_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::msort_out::redispatch(dispatchKeySet, self, out); + } + + // aten::msort(Tensor self) -> Tensor + inline at::Tensor msort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::msort::redispatch(dispatchKeySet, self); + } + + // aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor + inline at::Tensor argsort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=-1, bool descending=false) { + return at::_ops::argsort::redispatch(dispatchKeySet, self, dim, descending); + } + + // aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor + inline at::Tensor argsort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool stable, int64_t dim=-1, bool descending=false) { + return at::_ops::argsort_stable::redispatch(dispatchKeySet, self, stable, dim, descending); + } + + // aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor + inline at::Tensor argsort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending=false) { + return at::_ops::argsort_dimname::redispatch(dispatchKeySet, self, dim, descending); + } + + // aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple topk_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) { + return at::_ops::topk_values::redispatch(dispatchKeySet, self, k, dim, largest, sorted, values, indices); + } + + // aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple topk_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) { + return at::_ops::topk_values::redispatch(dispatchKeySet, self, k, dim, largest, sorted, values, indices); + } + + // aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple topk_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool largest=true, bool sorted=true) { + return at::_ops::topk_values::redispatch(dispatchKeySet, self, k, dim, largest, sorted, values, indices); + } + + // aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple topk_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) { + return at::_ops::topk_values::redispatch(dispatchKeySet, self, k, dim, largest, sorted, values, indices); + } + + // aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) + inline ::std::tuple topk(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) { + return at::_ops::topk::redispatch(dispatchKeySet, self, k, dim, largest, sorted); + } + + // aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) + inline ::std::tuple topk_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool largest=true, bool sorted=true) { + return at::_ops::topk::redispatch(dispatchKeySet, self, k, dim, largest, sorted); + } + + // aten::all(Tensor self) -> Tensor + inline at::Tensor all(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::all::redispatch(dispatchKeySet, self); + } + + // aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::all_all_out::redispatch(dispatchKeySet, self, out); + } + + // aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::all_all_out::redispatch(dispatchKeySet, self, out); + } + + // aten::any(Tensor self) -> Tensor + inline at::Tensor any(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::any::redispatch(dispatchKeySet, self); + } + + // aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::any_all_out::redispatch(dispatchKeySet, self, out); + } + + // aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::any_all_out::redispatch(dispatchKeySet, self, out); + } + + // aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & renorm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) { + return at::_ops::renorm_out::redispatch(dispatchKeySet, self, p, dim, maxnorm, out); + } + + // aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & renorm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out) { + return at::_ops::renorm_out::redispatch(dispatchKeySet, self, p, dim, maxnorm, out); + } + + // aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor + inline at::Tensor renorm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) { + return at::_ops::renorm::redispatch(dispatchKeySet, self, p, dim, maxnorm); + } + + // aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!) + inline at::Tensor & renorm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) { + return at::_ops::renorm_::redispatch(dispatchKeySet, self, p, dim, maxnorm); + } + + // aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) + inline at::Tensor unfold(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { + return at::_ops::unfold::redispatch(dispatchKeySet, self, dimension, size, step); + } + + // aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor + inline at::Tensor unfold_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward::redispatch(dispatchKeySet, grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step); + } + + // aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor + inline at::Tensor unfold_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward::redispatch(dispatchKeySet, grad_in, input_sizes, dim, size, step); + } + + // aten::equal(Tensor self, Tensor other) -> bool + inline bool equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::equal::redispatch(dispatchKeySet, self, other); + } + + // aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pow_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::pow_Tensor_Tensor_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pow_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::pow_Tensor_Tensor_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor + inline at::Tensor pow(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::pow_Tensor_Tensor::redispatch(dispatchKeySet, self, exponent); + } + + // aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pow_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::pow_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pow_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::pow_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor + inline at::Tensor pow(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::pow_Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pow_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::pow_Tensor_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pow_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) { + return at::_ops::pow_Tensor_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor + inline at::Tensor pow(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::pow_Tensor_Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) + inline at::Tensor & pow_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::pow__Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) + inline at::Tensor & pow_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::pow__Tensor::redispatch(dispatchKeySet, self, exponent); + } + + // aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & float_power_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::float_power_Tensor_Tensor_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & float_power_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::float_power_Tensor_Tensor_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor + inline at::Tensor float_power(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::float_power_Tensor_Tensor::redispatch(dispatchKeySet, self, exponent); + } + + // aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & float_power_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::float_power_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & float_power_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::float_power_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor + inline at::Tensor float_power(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::float_power_Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & float_power_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::float_power_Tensor_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & float_power_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) { + return at::_ops::float_power_Tensor_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor + inline at::Tensor float_power(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::float_power_Tensor_Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) + inline at::Tensor & float_power_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::float_power__Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) + inline at::Tensor & float_power_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::float_power__Tensor::redispatch(dispatchKeySet, self, exponent); + } + + // aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & normal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean=0, double std=1, c10::optional generator=c10::nullopt) { + return at::_ops::normal_::redispatch(dispatchKeySet, self, mean, std, generator); + } + + // aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor + inline at::Tensor normal_functional(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean=0, double std=1, c10::optional generator=c10::nullopt) { + return at::_ops::normal_functional::redispatch(dispatchKeySet, self, mean, std, generator); + } + + // aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & mean, double std=1, c10::optional generator=c10::nullopt) { + return at::_ops::normal_Tensor_float_out::redispatch(dispatchKeySet, mean, std, generator, out); + } + + // aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, double std, c10::optional generator, at::Tensor & out) { + return at::_ops::normal_Tensor_float_out::redispatch(dispatchKeySet, mean, std, generator, out); + } + + // aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor + inline at::Tensor normal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, double std=1, c10::optional generator=c10::nullopt) { + return at::_ops::normal_Tensor_float::redispatch(dispatchKeySet, mean, std, generator); + } + + // aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, double mean, const at::Tensor & std, c10::optional generator=c10::nullopt) { + return at::_ops::normal_float_Tensor_out::redispatch(dispatchKeySet, mean, std, generator, out); + } + + // aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_outf(c10::DispatchKeySet dispatchKeySet, double mean, const at::Tensor & std, c10::optional generator, at::Tensor & out) { + return at::_ops::normal_float_Tensor_out::redispatch(dispatchKeySet, mean, std, generator, out); + } + + // aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor + inline at::Tensor normal(c10::DispatchKeySet dispatchKeySet, double mean, const at::Tensor & std, c10::optional generator=c10::nullopt) { + return at::_ops::normal_float_Tensor::redispatch(dispatchKeySet, mean, std, generator); + } + + // aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & mean, const at::Tensor & std, c10::optional generator=c10::nullopt) { + return at::_ops::normal_Tensor_Tensor_out::redispatch(dispatchKeySet, mean, std, generator, out); + } + + // aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, const at::Tensor & std, c10::optional generator, at::Tensor & out) { + return at::_ops::normal_Tensor_Tensor_out::redispatch(dispatchKeySet, mean, std, generator, out); + } + + // aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor + inline at::Tensor normal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, const at::Tensor & std, c10::optional generator=c10::nullopt) { + return at::_ops::normal_Tensor_Tensor::redispatch(dispatchKeySet, mean, std, generator); + } + + // aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor normal(c10::DispatchKeySet dispatchKeySet, double mean, double std, at::IntArrayRef size, c10::optional generator=c10::nullopt, at::TensorOptions options={}) { + return at::_ops::normal_float_float::redispatch(dispatchKeySet, mean, std, c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor normal(c10::DispatchKeySet dispatchKeySet, double mean, double std, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::normal_float_float::redispatch(dispatchKeySet, mean, std, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); + } + + // aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor normal_symint(c10::DispatchKeySet dispatchKeySet, double mean, double std, c10::SymIntArrayRef size, c10::optional generator=c10::nullopt, at::TensorOptions options={}) { + return at::_ops::normal_float_float::redispatch(dispatchKeySet, mean, std, size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor normal_symint(c10::DispatchKeySet dispatchKeySet, double mean, double std, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::normal_float_float::redispatch(dispatchKeySet, mean, std, size, generator, dtype, layout, device, pin_memory); + } + + // aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, double mean, double std, at::IntArrayRef size, c10::optional generator=c10::nullopt) { + return at::_ops::normal_float_float_out::redispatch(dispatchKeySet, mean, std, c10::fromIntArrayRefSlow(size), generator, out); + } + + // aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_outf(c10::DispatchKeySet dispatchKeySet, double mean, double std, at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::normal_float_float_out::redispatch(dispatchKeySet, mean, std, c10::fromIntArrayRefSlow(size), generator, out); + } + + // aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, double mean, double std, c10::SymIntArrayRef size, c10::optional generator=c10::nullopt) { + return at::_ops::normal_float_float_out::redispatch(dispatchKeySet, mean, std, size, generator, out); + } + + // aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_symint_outf(c10::DispatchKeySet dispatchKeySet, double mean, double std, c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::normal_float_float_out::redispatch(dispatchKeySet, mean, std, size, generator, out); + } + + // aten::alias(Tensor(a) self) -> Tensor(a) + inline at::Tensor alias(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::alias::redispatch(dispatchKeySet, self); + } + + // aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> () + inline void _amp_foreach_non_finite_check_and_unscale_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale_::redispatch(dispatchKeySet, self, found_inf, inv_scale); + } + + // aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!) + inline at::Tensor & _amp_update_scale_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { + return at::_ops::_amp_update_scale_::redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); + } + + // aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + inline ::std::vector _foreach_add(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add_Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + inline void _foreach_add_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add__Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] + inline ::std::vector _foreach_add(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_List::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + inline void _foreach_add_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add__List::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_add(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add_ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + inline void _foreach_add_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add__ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[] + inline ::std::vector _foreach_add(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> () + inline void _foreach_add_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add__Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + inline ::std::vector _foreach_sub(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_sub_Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + inline void _foreach_sub_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_sub__Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] + inline ::std::vector _foreach_sub(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_sub_List::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + inline void _foreach_sub_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_sub__List::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_sub(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_sub_ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + inline void _foreach_sub_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_sub__ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + inline ::std::vector _foreach_mul(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_mul_Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + inline void _foreach_mul_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_mul__Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[] + inline ::std::vector _foreach_mul(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_mul_List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> () + inline void _foreach_mul_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_mul__List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_mul(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_mul_ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + inline void _foreach_mul_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_mul__ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_mul.Tensor(Tensor[] self, Tensor other) -> Tensor[] + inline ::std::vector _foreach_mul(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other) { + return at::_ops::_foreach_mul_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_mul_.Tensor(Tensor(a!)[] self, Tensor other) -> () + inline void _foreach_mul_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other) { + return at::_ops::_foreach_mul__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + inline ::std::vector _foreach_div(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_div_Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + inline void _foreach_div_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_div__Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[] + inline ::std::vector _foreach_div(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_div_List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> () + inline void _foreach_div_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_div__List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_div(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_div_ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + inline void _foreach_div_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_div__ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[] + inline ::std::vector _foreach_div(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other) { + return at::_ops::_foreach_div_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> () + inline void _foreach_div_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other) { + return at::_ops::_foreach_div__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + inline ::std::vector _foreach_clamp_max(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_max_Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + inline void _foreach_clamp_max_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_max__Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[] + inline ::std::vector _foreach_clamp_max(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_max_List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> () + inline void _foreach_clamp_max_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_max__List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_clamp_max(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_max_ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + inline void _foreach_clamp_max_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_max__ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + inline ::std::vector _foreach_clamp_min(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_min_Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + inline void _foreach_clamp_min_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_min__Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[] + inline ::std::vector _foreach_clamp_min(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_min_List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> () + inline void _foreach_clamp_min_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_min__List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_clamp_min(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_min_ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + inline void _foreach_clamp_min_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_min__ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + inline ::std::vector _foreach_maximum(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_maximum_Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + inline void _foreach_maximum_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_maximum__Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[] + inline ::std::vector _foreach_maximum(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_maximum_List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> () + inline void _foreach_maximum_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_maximum__List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_maximum(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_maximum_ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + inline void _foreach_maximum_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_maximum__ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + inline ::std::vector _foreach_minimum(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_minimum_Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + inline void _foreach_minimum_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_minimum__Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[] + inline ::std::vector _foreach_minimum(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_minimum_List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> () + inline void _foreach_minimum_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_minimum__List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_minimum(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_minimum_ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + inline void _foreach_minimum_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_minimum__ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + inline ::std::vector _foreach_addcdiv(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcdiv_Scalar::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_addcdiv(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcdiv_ScalarList::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars); + } + + // aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] + inline ::std::vector _foreach_addcdiv(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + return at::_ops::_foreach_addcdiv_Tensor::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars); + } + + // aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + inline void _foreach_addcdiv_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcdiv__Scalar::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + inline void _foreach_addcdiv_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcdiv__ScalarList::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars); + } + + // aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () + inline void _foreach_addcdiv_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + return at::_ops::_foreach_addcdiv__Tensor::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars); + } + + // aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + inline ::std::vector _foreach_addcmul(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcmul_Scalar::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_addcmul(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcmul_ScalarList::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars); + } + + // aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] + inline ::std::vector _foreach_addcmul(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + return at::_ops::_foreach_addcmul_Tensor::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars); + } + + // aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + inline void _foreach_addcmul_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcmul__Scalar::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + inline void _foreach_addcmul_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcmul__ScalarList::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars); + } + + // aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () + inline void _foreach_addcmul_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + return at::_ops::_foreach_addcmul__Tensor::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars); + } + + // aten::_foreach_abs(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_abs(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_abs::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_abs_(Tensor(a!)[] self) -> () + inline void _foreach_abs_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_abs_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_acos(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_acos(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_acos::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_acos_(Tensor(a!)[] self) -> () + inline void _foreach_acos_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_acos_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_asin(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_asin(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_asin::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_asin_(Tensor(a!)[] self) -> () + inline void _foreach_asin_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_asin_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_atan(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_atan(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_atan::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_atan_(Tensor(a!)[] self) -> () + inline void _foreach_atan_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_atan_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_ceil(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_ceil(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_ceil::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_ceil_(Tensor(a!)[] self) -> () + inline void _foreach_ceil_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_ceil_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_cos(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_cos(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_cos::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_cos_(Tensor(a!)[] self) -> () + inline void _foreach_cos_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_cos_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_cosh(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_cosh(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_cosh::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_cosh_(Tensor(a!)[] self) -> () + inline void _foreach_cosh_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_cosh_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_erf(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_erf(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_erf::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_erf_(Tensor(a!)[] self) -> () + inline void _foreach_erf_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_erf_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_erfc(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_erfc(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_erfc::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_erfc_(Tensor(a!)[] self) -> () + inline void _foreach_erfc_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_erfc_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_exp(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_exp(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_exp::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_exp_(Tensor(a!)[] self) -> () + inline void _foreach_exp_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_exp_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_expm1(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_expm1(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_expm1::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_expm1_(Tensor(a!)[] self) -> () + inline void _foreach_expm1_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_expm1_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_floor(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_floor(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_floor::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_floor_(Tensor(a!)[] self) -> () + inline void _foreach_floor_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_floor_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_frac(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_frac(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_frac::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_frac_(Tensor(a!)[] self) -> () + inline void _foreach_frac_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_frac_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[] + inline ::std::vector _foreach_lerp(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights) { + return at::_ops::_foreach_lerp_List::redispatch(dispatchKeySet, self, tensors1, weights); + } + + // aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> () + inline void _foreach_lerp_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights) { + return at::_ops::_foreach_lerp__List::redispatch(dispatchKeySet, self, tensors1, weights); + } + + // aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[] + inline ::std::vector _foreach_lerp(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) { + return at::_ops::_foreach_lerp_Scalar::redispatch(dispatchKeySet, self, tensors1, weight); + } + + // aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> () + inline void _foreach_lerp_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) { + return at::_ops::_foreach_lerp__Scalar::redispatch(dispatchKeySet, self, tensors1, weight); + } + + // aten::_foreach_lgamma(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_lgamma(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_lgamma::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_lgamma_(Tensor(a!)[] self) -> () + inline void _foreach_lgamma_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_lgamma_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_log(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log_(Tensor(a!)[] self) -> () + inline void _foreach_log_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log10(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_log10(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log10::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log10_(Tensor(a!)[] self) -> () + inline void _foreach_log10_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log10_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log1p(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_log1p(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log1p::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log1p_(Tensor(a!)[] self) -> () + inline void _foreach_log1p_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log1p_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log2(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_log2(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log2::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log2_(Tensor(a!)[] self) -> () + inline void _foreach_log2_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log2_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_neg(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_neg(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_neg::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_neg_(Tensor(a!)[] self) -> () + inline void _foreach_neg_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_neg_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[] + inline ::std::vector _foreach_norm(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & ord=2) { + return at::_ops::_foreach_norm_Scalar::redispatch(dispatchKeySet, self, ord); + } + + // aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[] + inline ::std::vector _foreach_pow(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent) { + return at::_ops::_foreach_pow_List::redispatch(dispatchKeySet, self, exponent); + } + + // aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[] + inline ::std::vector _foreach_pow(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent) { + return at::_ops::_foreach_pow_Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[] + inline ::std::vector _foreach_pow(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef exponent) { + return at::_ops::_foreach_pow_ScalarList::redispatch(dispatchKeySet, self, exponent); + } + + // aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[] + inline ::std::vector _foreach_pow(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, at::TensorList exponent) { + return at::_ops::_foreach_pow_ScalarAndTensor::redispatch(dispatchKeySet, self, exponent); + } + + // aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> () + inline void _foreach_pow_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent) { + return at::_ops::_foreach_pow__List::redispatch(dispatchKeySet, self, exponent); + } + + // aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> () + inline void _foreach_pow_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent) { + return at::_ops::_foreach_pow__Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> () + inline void _foreach_pow_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef exponent) { + return at::_ops::_foreach_pow__ScalarList::redispatch(dispatchKeySet, self, exponent); + } + + // aten::_foreach_reciprocal(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_reciprocal(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_reciprocal::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_reciprocal_(Tensor(a!)[] self) -> () + inline void _foreach_reciprocal_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_reciprocal_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_round(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_round(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_round::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_round_(Tensor(a!)[] self) -> () + inline void _foreach_round_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_round_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sigmoid(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_sigmoid(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sigmoid::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sigmoid_(Tensor(a!)[] self) -> () + inline void _foreach_sigmoid_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sigmoid_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sign(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_sign(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sign::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sign_(Tensor(a!)[] self) -> () + inline void _foreach_sign_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sign_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sin(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_sin(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sin::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sin_(Tensor(a!)[] self) -> () + inline void _foreach_sin_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sin_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sinh(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_sinh(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sinh::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sinh_(Tensor(a!)[] self) -> () + inline void _foreach_sinh_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sinh_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sqrt(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_sqrt(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sqrt::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sqrt_(Tensor(a!)[] self) -> () + inline void _foreach_sqrt_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sqrt_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_tan(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_tan(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_tan::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_tan_(Tensor(a!)[] self) -> () + inline void _foreach_tan_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_tan_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_tanh(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_tanh(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_tanh::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_tanh_(Tensor(a!)[] self) -> () + inline void _foreach_tanh_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_tanh_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_trunc(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_trunc(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_trunc::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_trunc_(Tensor(a!)[] self) -> () + inline void _foreach_trunc_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_trunc_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_zero_(Tensor(a!)[] self) -> () + inline void _foreach_zero_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_zero_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_copy_(Tensor(a!)[] self, Tensor[] src, bool non_blocking=False) -> () + inline void _foreach_copy_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList src, bool non_blocking=false) { + return at::_ops::_foreach_copy_::redispatch(dispatchKeySet, self, src, non_blocking); + } + + // aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + inline at::Tensor bucketize(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false) { + return at::_ops::bucketize_Tensor::redispatch(dispatchKeySet, self, boundaries, out_int32, right); + } + + // aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bucketize_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false) { + return at::_ops::bucketize_Tensor_out::redispatch(dispatchKeySet, self, boundaries, out_int32, right, out); + } + + // aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bucketize_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) { + return at::_ops::bucketize_Tensor_out::redispatch(dispatchKeySet, self, boundaries, out_int32, right, out); + } + + // aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + inline at::Tensor bucketize(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false) { + return at::_ops::bucketize_Scalar::redispatch(dispatchKeySet, self, boundaries, out_int32, right); + } + + // aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor + inline at::Tensor searchsorted(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Tensor::redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter); + } + + // aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & searchsorted_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Tensor_out::redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter, out); + } + + // aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & searchsorted_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out) { + return at::_ops::searchsorted_Tensor_out::redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter, out); + } + + // aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor + inline at::Tensor searchsorted(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Scalar::redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter); + } + + // aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & searchsorted_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Scalar_out::redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter, out); + } + + // aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & searchsorted_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out) { + return at::_ops::searchsorted_Scalar_out::redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter, out); + } + + // aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor + inline at::Tensor _convert_indices_from_coo_to_csr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t size, bool out_int32=false) { + return at::_ops::_convert_indices_from_coo_to_csr::redispatch(dispatchKeySet, self, size, out_int32); + } + + // aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convert_indices_from_coo_to_csr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t size, bool out_int32=false) { + return at::_ops::_convert_indices_from_coo_to_csr_out::redispatch(dispatchKeySet, self, size, out_int32, out); + } + + // aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convert_indices_from_coo_to_csr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out) { + return at::_ops::_convert_indices_from_coo_to_csr_out::redispatch(dispatchKeySet, self, size, out_int32, out); + } + + // aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor + inline at::Tensor _convert_indices_from_csr_to_coo(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false) { + return at::_ops::_convert_indices_from_csr_to_coo::redispatch(dispatchKeySet, crow_indices, col_indices, out_int32, transpose); + } + + // aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convert_indices_from_csr_to_coo_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false) { + return at::_ops::_convert_indices_from_csr_to_coo_out::redispatch(dispatchKeySet, crow_indices, col_indices, out_int32, transpose, out); + } + + // aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convert_indices_from_csr_to_coo_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out) { + return at::_ops::_convert_indices_from_csr_to_coo_out::redispatch(dispatchKeySet, crow_indices, col_indices, out_int32, transpose, out); + } + + // aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mse_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::mse_loss_out::redispatch(dispatchKeySet, self, target, reduction, out); + } + + // aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mse_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) { + return at::_ops::mse_loss_out::redispatch(dispatchKeySet, self, target, reduction, out); + } + + // aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + inline at::Tensor mse_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::mse_loss::redispatch(dispatchKeySet, self, target, reduction); + } + + // aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & mse_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::mse_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, grad_input); + } + + // aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & mse_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) { + return at::_ops::mse_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, grad_input); + } + + // aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor + inline at::Tensor mse_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::mse_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, reduction); + } + + // aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + inline at::Tensor l1_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::l1_loss::redispatch(dispatchKeySet, self, target, reduction); + } + + // aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multi_margin_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::multi_margin_loss_out::redispatch(dispatchKeySet, self, target, p, margin, weight, reduction, out); + } + + // aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multi_margin_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight, int64_t reduction, at::Tensor & out) { + return at::_ops::multi_margin_loss_out::redispatch(dispatchKeySet, self, target, p, margin, weight, reduction, out); + } + + // aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor + inline at::Tensor multi_margin_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::multi_margin_loss::redispatch(dispatchKeySet, self, target, p, margin, weight, reduction); + } + + // aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & multi_margin_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::multi_margin_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, p, margin, weight, reduction, grad_input); + } + + // aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & multi_margin_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight, int64_t reduction, at::Tensor & grad_input) { + return at::_ops::multi_margin_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, p, margin, weight, reduction, grad_input); + } + + // aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor + inline at::Tensor multi_margin_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::multi_margin_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, p, margin, weight, reduction); + } + + // aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multilabel_margin_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::multilabel_margin_loss_out::redispatch(dispatchKeySet, self, target, reduction, out); + } + + // aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multilabel_margin_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) { + return at::_ops::multilabel_margin_loss_out::redispatch(dispatchKeySet, self, target, reduction, out); + } + + // aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + inline at::Tensor multilabel_margin_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::multilabel_margin_loss::redispatch(dispatchKeySet, self, target, reduction); + } + + // aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple multilabel_margin_loss_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & is_target, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::multilabel_margin_loss_forward_output::redispatch(dispatchKeySet, self, target, reduction, output, is_target); + } + + // aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple multilabel_margin_loss_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & output, at::Tensor & is_target) { + return at::_ops::multilabel_margin_loss_forward_output::redispatch(dispatchKeySet, self, target, reduction, output, is_target); + } + + // aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target) + inline ::std::tuple multilabel_margin_loss_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::multilabel_margin_loss_forward::redispatch(dispatchKeySet, self, target, reduction); + } + + // aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & multilabel_margin_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) { + return at::_ops::multilabel_margin_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, is_target, grad_input); + } + + // aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & multilabel_margin_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input) { + return at::_ops::multilabel_margin_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, is_target, grad_input); + } + + // aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor + inline at::Tensor multilabel_margin_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) { + return at::_ops::multilabel_margin_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, reduction, is_target); + } + + // aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nll_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss_out::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out); + } + + // aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nll_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out) { + return at::_ops::nll_loss_out::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out); + } + + // aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nll_loss_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) { + return at::_ops::nll_loss_out::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out); + } + + // aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nll_loss_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) { + return at::_ops::nll_loss_out::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out); + } + + // aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor + inline at::Tensor nll_loss_nd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss_nd::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor + inline at::Tensor nll_loss_nd_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) { + return at::_ops::nll_loss_nd::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor + inline at::Tensor nll_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor + inline at::Tensor nll_loss_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) { + return at::_ops::nll_loss::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple nll_loss_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss_forward_output::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight); + } + + // aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple nll_loss_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss_forward_output::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight); + } + + // aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple nll_loss_forward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + return at::_ops::nll_loss_forward_output::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight); + } + + // aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple nll_loss_forward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss_forward_output::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight); + } + + // aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) + inline ::std::tuple nll_loss_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss_forward::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) + inline ::std::tuple nll_loss_forward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + return at::_ops::nll_loss_forward::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & nll_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } + + // aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & nll_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) { + return at::_ops::nll_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } + + // aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & nll_loss_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } + + // aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & nll_loss_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) { + return at::_ops::nll_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } + + // aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor + inline at::Tensor nll_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight); + } + + // aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor + inline at::Tensor nll_loss_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight); + } + + // aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nll_loss2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss2d_out::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out); + } + + // aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nll_loss2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out) { + return at::_ops::nll_loss2d_out::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out); + } + + // aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nll_loss2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) { + return at::_ops::nll_loss2d_out::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out); + } + + // aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nll_loss2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) { + return at::_ops::nll_loss2d_out::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out); + } + + // aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor + inline at::Tensor nll_loss2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss2d::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor + inline at::Tensor nll_loss2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) { + return at::_ops::nll_loss2d::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple nll_loss2d_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss2d_forward_output::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight); + } + + // aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple nll_loss2d_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss2d_forward_output::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight); + } + + // aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple nll_loss2d_forward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + return at::_ops::nll_loss2d_forward_output::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight); + } + + // aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple nll_loss2d_forward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss2d_forward_output::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight); + } + + // aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) + inline ::std::tuple nll_loss2d_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss2d_forward::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) + inline ::std::tuple nll_loss2d_forward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + return at::_ops::nll_loss2d_forward::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & nll_loss2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } + + // aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & nll_loss2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) { + return at::_ops::nll_loss2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } + + // aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & nll_loss2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } + + // aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & nll_loss2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) { + return at::_ops::nll_loss2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } + + // aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor + inline at::Tensor nll_loss2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight); + } + + // aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor + inline at::Tensor nll_loss2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight); + } + + // aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & smooth_l1_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double beta=1.0) { + return at::_ops::smooth_l1_loss_out::redispatch(dispatchKeySet, self, target, reduction, beta, out); + } + + // aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & smooth_l1_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out) { + return at::_ops::smooth_l1_loss_out::redispatch(dispatchKeySet, self, target, reduction, beta, out); + } + + // aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor + inline at::Tensor smooth_l1_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double beta=1.0) { + return at::_ops::smooth_l1_loss::redispatch(dispatchKeySet, self, target, reduction, beta); + } + + // aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & smooth_l1_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) { + return at::_ops::smooth_l1_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, beta, grad_input); + } + + // aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & smooth_l1_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input) { + return at::_ops::smooth_l1_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, beta, grad_input); + } + + // aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor + inline at::Tensor smooth_l1_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) { + return at::_ops::smooth_l1_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, reduction, beta); + } + + // aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & huber_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) { + return at::_ops::huber_loss_out::redispatch(dispatchKeySet, self, target, reduction, delta, out); + } + + // aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & huber_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) { + return at::_ops::huber_loss_out::redispatch(dispatchKeySet, self, target, reduction, delta, out); + } + + // aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor + inline at::Tensor huber_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) { + return at::_ops::huber_loss::redispatch(dispatchKeySet, self, target, reduction, delta); + } + + // aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & huber_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { + return at::_ops::huber_loss_backward_out::redispatch(dispatchKeySet, grad_output, self, target, reduction, delta, grad_input); + } + + // aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & huber_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) { + return at::_ops::huber_loss_backward_out::redispatch(dispatchKeySet, grad_output, self, target, reduction, delta, grad_input); + } + + // aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor + inline at::Tensor huber_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { + return at::_ops::huber_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, reduction, delta); + } + + // aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & soft_margin_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::soft_margin_loss_out::redispatch(dispatchKeySet, self, target, reduction, out); + } + + // aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & soft_margin_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) { + return at::_ops::soft_margin_loss_out::redispatch(dispatchKeySet, self, target, reduction, out); + } + + // aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + inline at::Tensor soft_margin_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::soft_margin_loss::redispatch(dispatchKeySet, self, target, reduction); + } + + // aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & soft_margin_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::soft_margin_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, grad_input); + } + + // aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & soft_margin_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) { + return at::_ops::soft_margin_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, grad_input); + } + + // aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor + inline at::Tensor soft_margin_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::soft_margin_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, reduction); + } + + // aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & elu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1) { + return at::_ops::elu_out::redispatch(dispatchKeySet, self, alpha, scale, input_scale, out); + } + + // aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & elu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out) { + return at::_ops::elu_out::redispatch(dispatchKeySet, self, alpha, scale, input_scale, out); + } + + // aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor + inline at::Tensor elu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1) { + return at::_ops::elu::redispatch(dispatchKeySet, self, alpha, scale, input_scale); + } + + // aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & elu_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) { + return at::_ops::elu_backward_grad_input::redispatch(dispatchKeySet, grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input); + } + + // aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & elu_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) { + return at::_ops::elu_backward_grad_input::redispatch(dispatchKeySet, grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input); + } + + // aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor + inline at::Tensor elu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) { + return at::_ops::elu_backward::redispatch(dispatchKeySet, grad_output, alpha, scale, input_scale, is_result, self_or_result); + } + + // aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!) + inline at::Tensor & elu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1) { + return at::_ops::elu_::redispatch(dispatchKeySet, self, alpha, scale, input_scale); + } + + // aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & glu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim=-1) { + return at::_ops::glu_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & glu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::glu_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::glu(Tensor self, int dim=-1) -> Tensor + inline at::Tensor glu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=-1) { + return at::_ops::glu::redispatch(dispatchKeySet, self, dim); + } + + // aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & glu_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) { + return at::_ops::glu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, dim, grad_input); + } + + // aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & glu_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) { + return at::_ops::glu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, dim, grad_input); + } + + // aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor + inline at::Tensor glu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) { + return at::_ops::glu_backward::redispatch(dispatchKeySet, grad_output, self, dim); + } + + // aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor + inline at::Tensor glu_jvp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) { + return at::_ops::glu_jvp::redispatch(dispatchKeySet, glu, x, dx, dim); + } + + // aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor + inline at::Tensor glu_backward_jvp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) { + return at::_ops::glu_backward_jvp::redispatch(dispatchKeySet, grad_x, grad_glu, x, dgrad_glu, dx, dim); + } + + // aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardsigmoid_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::hardsigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardsigmoid_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardsigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::hardsigmoid(Tensor self) -> Tensor + inline at::Tensor hardsigmoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::hardsigmoid::redispatch(dispatchKeySet, self); + } + + // aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & hardsigmoid_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::hardsigmoid_::redispatch(dispatchKeySet, self); + } + + // aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & hardsigmoid_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardsigmoid_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, grad_input); + } + + // aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & hardsigmoid_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) { + return at::_ops::hardsigmoid_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, grad_input); + } + + // aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor hardsigmoid_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardsigmoid_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardtanh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_out::redispatch(dispatchKeySet, self, min_val, max_val, out); + } + + // aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardtanh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) { + return at::_ops::hardtanh_out::redispatch(dispatchKeySet, self, min_val, max_val, out); + } + + // aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor + inline at::Tensor hardtanh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh::redispatch(dispatchKeySet, self, min_val, max_val); + } + + // aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & hardtanh_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + return at::_ops::hardtanh_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, min_val, max_val, grad_input); + } + + // aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & hardtanh_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) { + return at::_ops::hardtanh_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, min_val, max_val, grad_input); + } + + // aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor + inline at::Tensor hardtanh_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + return at::_ops::hardtanh_backward::redispatch(dispatchKeySet, grad_output, self, min_val, max_val); + } + + // aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) + inline at::Tensor & hardtanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_::redispatch(dispatchKeySet, self, min_val, max_val); + } + + // aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardswish_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::hardswish_out::redispatch(dispatchKeySet, self, out); + } + + // aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardswish_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardswish_out::redispatch(dispatchKeySet, self, out); + } + + // aten::hardswish(Tensor self) -> Tensor + inline at::Tensor hardswish(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::hardswish::redispatch(dispatchKeySet, self); + } + + // aten::hardswish_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & hardswish_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::hardswish_::redispatch(dispatchKeySet, self); + } + + // aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor hardswish_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardswish_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & leaky_relu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu_out::redispatch(dispatchKeySet, self, negative_slope, out); + } + + // aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & leaky_relu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) { + return at::_ops::leaky_relu_out::redispatch(dispatchKeySet, self, negative_slope, out); + } + + // aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor + inline at::Tensor leaky_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu::redispatch(dispatchKeySet, self, negative_slope); + } + + // aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & leaky_relu_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, negative_slope, self_is_result, grad_input); + } + + // aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & leaky_relu_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) { + return at::_ops::leaky_relu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, negative_slope, self_is_result, grad_input); + } + + // aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor + inline at::Tensor leaky_relu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward::redispatch(dispatchKeySet, grad_output, self, negative_slope, self_is_result); + } + + // aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) + inline at::Tensor & leaky_relu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu_::redispatch(dispatchKeySet, self, negative_slope); + } + + // aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_sigmoid_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::log_sigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_sigmoid_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::log_sigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log_sigmoid(Tensor self) -> Tensor + inline at::Tensor log_sigmoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::log_sigmoid::redispatch(dispatchKeySet, self); + } + + // aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple log_sigmoid_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & buffer, const at::Tensor & self) { + return at::_ops::log_sigmoid_forward_output::redispatch(dispatchKeySet, self, output, buffer); + } + + // aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple log_sigmoid_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) { + return at::_ops::log_sigmoid_forward_output::redispatch(dispatchKeySet, self, output, buffer); + } + + // aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer) + inline ::std::tuple log_sigmoid_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::log_sigmoid_forward::redispatch(dispatchKeySet, self); + } + + // aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & log_sigmoid_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) { + return at::_ops::log_sigmoid_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, buffer, grad_input); + } + + // aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & log_sigmoid_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) { + return at::_ops::log_sigmoid_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, buffer, grad_input); + } + + // aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor + inline at::Tensor log_sigmoid_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) { + return at::_ops::log_sigmoid_backward::redispatch(dispatchKeySet, grad_output, self, buffer); + } + + // aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rrelu_with_noise_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt) { + return at::_ops::rrelu_with_noise_out::redispatch(dispatchKeySet, self, noise, lower, upper, training, generator, out); + } + + // aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rrelu_with_noise_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator, at::Tensor & out) { + return at::_ops::rrelu_with_noise_out::redispatch(dispatchKeySet, self, noise, lower, upper, training, generator, out); + } + + // aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor + inline at::Tensor rrelu_with_noise(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt) { + return at::_ops::rrelu_with_noise::redispatch(dispatchKeySet, self, noise, lower, upper, training, generator); + } + + // aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor + inline at::Tensor rrelu_with_noise_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) { + return at::_ops::rrelu_with_noise_backward::redispatch(dispatchKeySet, grad_output, self, noise, lower, upper, training, self_is_result); + } + + // aten::rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & rrelu_with_noise_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt) { + return at::_ops::rrelu_with_noise_::redispatch(dispatchKeySet, self, noise, lower, upper, training, generator); + } + + // aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & softplus_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20) { + return at::_ops::softplus_out::redispatch(dispatchKeySet, self, beta, threshold, out); + } + + // aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & softplus_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) { + return at::_ops::softplus_out::redispatch(dispatchKeySet, self, beta, threshold, out); + } + + // aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor + inline at::Tensor softplus(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20) { + return at::_ops::softplus::redispatch(dispatchKeySet, self, beta, threshold); + } + + // aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & softplus_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) { + return at::_ops::softplus_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, beta, threshold, grad_input); + } + + // aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & softplus_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) { + return at::_ops::softplus_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, beta, threshold, grad_input); + } + + // aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor + inline at::Tensor softplus_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) { + return at::_ops::softplus_backward::redispatch(dispatchKeySet, grad_output, self, beta, threshold); + } + + // aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & softshrink_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::softshrink_out::redispatch(dispatchKeySet, self, lambd, out); + } + + // aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & softshrink_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) { + return at::_ops::softshrink_out::redispatch(dispatchKeySet, self, lambd, out); + } + + // aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor + inline at::Tensor softshrink(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::softshrink::redispatch(dispatchKeySet, self, lambd); + } + + // aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & softshrink_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::softshrink_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, lambd, grad_input); + } + + // aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & softshrink_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) { + return at::_ops::softshrink_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, lambd, grad_input); + } + + // aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor + inline at::Tensor softshrink_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::softshrink_backward::redispatch(dispatchKeySet, grad_output, self, lambd); + } + + // aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), out); + } + + // aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), out); + } + + // aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor + inline at::Tensor adaptive_avg_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size)); + } + + // aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor + inline at::Tensor adaptive_avg_pool2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::adaptive_avg_pool2d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor + inline at::Tensor mkldnn_adaptive_avg_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::mkldnn_adaptive_avg_pool2d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_adaptive_avg_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::mkldnn_adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_adaptive_avg_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::mkldnn_adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor mkldnn_adaptive_avg_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::mkldnn_adaptive_avg_pool2d_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor + inline at::Tensor _adaptive_avg_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size)); + } + + // aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor + inline at::Tensor _adaptive_avg_pool2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool2d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor _adaptive_avg_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::_adaptive_avg_pool2d_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), out); + } + + // aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), out); + } + + // aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool3d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor + inline at::Tensor adaptive_avg_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size)); + } + + // aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor + inline at::Tensor adaptive_avg_pool3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor + inline at::Tensor _adaptive_avg_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool3d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size)); + } + + // aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor + inline at::Tensor _adaptive_avg_pool3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool3d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::adaptive_avg_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, grad_input); + } + + // aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) { + return at::_ops::adaptive_avg_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, grad_input); + } + + // aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor _adaptive_avg_pool3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::_adaptive_avg_pool3d_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple adaptive_max_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool2d_out::redispatch(dispatchKeySet, self, output_size, out, indices); + } + + // aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple adaptive_max_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) { + return at::_ops::adaptive_max_pool2d_out::redispatch(dispatchKeySet, self, output_size, out, indices); + } + + // aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) + inline ::std::tuple adaptive_max_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool2d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & adaptive_max_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, indices, grad_input); + } + + // aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & adaptive_max_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::adaptive_max_pool2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, indices, grad_input); + } + + // aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor + inline at::Tensor adaptive_max_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool2d_backward::redispatch(dispatchKeySet, grad_output, self, indices); + } + + // aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple adaptive_max_pool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool3d_out::redispatch(dispatchKeySet, self, output_size, out, indices); + } + + // aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple adaptive_max_pool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_out::redispatch(dispatchKeySet, self, output_size, out, indices); + } + + // aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) + inline ::std::tuple adaptive_max_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool3d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & adaptive_max_pool3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, indices, grad_input); + } + + // aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & adaptive_max_pool3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::adaptive_max_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, indices, grad_input); + } + + // aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor + inline at::Tensor adaptive_max_pool3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_backward::redispatch(dispatchKeySet, grad_output, self, indices); + } + + // aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & avg_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt) { + return at::_ops::avg_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out); + } + + // aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & avg_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & out) { + return at::_ops::avg_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out); + } + + // aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor + inline at::Tensor avg_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt) { + return at::_ops::avg_pool2d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + + // aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & avg_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + return at::_ops::avg_pool2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); + } + + // aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & avg_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input) { + return at::_ops::avg_pool2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); + } + + // aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor + inline at::Tensor avg_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + return at::_ops::avg_pool2d_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + + // aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & avg_pool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt) { + return at::_ops::avg_pool3d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out); + } + + // aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & avg_pool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & out) { + return at::_ops::avg_pool3d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out); + } + + // aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor + inline at::Tensor avg_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt) { + return at::_ops::avg_pool3d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + + // aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & avg_pool3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + return at::_ops::avg_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); + } + + // aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & avg_pool3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input) { + return at::_ops::avg_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); + } + + // aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor + inline at::Tensor avg_pool3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + return at::_ops::avg_pool3d_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + + // aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fractional_max_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool2d_output::redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples, output, indices); + } + + // aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fractional_max_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) { + return at::_ops::fractional_max_pool2d_output::redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples, output, indices); + } + + // aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor) + inline ::std::tuple fractional_max_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool2d::redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples); + } + + // aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & fractional_max_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices, grad_input); + } + + // aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & fractional_max_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::fractional_max_pool2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices, grad_input); + } + + // aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor + inline at::Tensor fractional_max_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool2d_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices); + } + + // aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fractional_max_pool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool3d_output::redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples, output, indices); + } + + // aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fractional_max_pool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) { + return at::_ops::fractional_max_pool3d_output::redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples, output, indices); + } + + // aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) + inline ::std::tuple fractional_max_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool3d::redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples); + } + + // aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & fractional_max_pool3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices, grad_input); + } + + // aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & fractional_max_pool3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::fractional_max_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices, grad_input); + } + + // aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor + inline at::Tensor fractional_max_pool3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool3d_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices); + } + + // aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple max_pool2d_with_indices_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d_with_indices_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); + } + + // aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple max_pool2d_with_indices_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) { + return at::_ops::max_pool2d_with_indices_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); + } + + // aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + inline ::std::tuple max_pool2d_with_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d_with_indices::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & max_pool2d_with_indices_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool2d_with_indices_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); + } + + // aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & max_pool2d_with_indices_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::max_pool2d_with_indices_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); + } + + // aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor + inline at::Tensor max_pool2d_with_indices_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool2d_with_indices_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); + } + + // aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple max_pool3d_with_indices_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool3d_with_indices_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); + } + + // aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple max_pool3d_with_indices_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) { + return at::_ops::max_pool3d_with_indices_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); + } + + // aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + inline ::std::tuple max_pool3d_with_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool3d_with_indices::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & max_pool3d_with_indices_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool3d_with_indices_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); + } + + // aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & max_pool3d_with_indices_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::max_pool3d_with_indices_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); + } + + // aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor + inline at::Tensor max_pool3d_with_indices_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool3d_with_indices_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); + } + + // aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_unpool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) { + return at::_ops::max_unpool2d_out::redispatch(dispatchKeySet, self, indices, c10::fromIntArrayRefSlow(output_size), out); + } + + // aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_unpool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::max_unpool2d_out::redispatch(dispatchKeySet, self, indices, c10::fromIntArrayRefSlow(output_size), out); + } + + // aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_unpool2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) { + return at::_ops::max_unpool2d_out::redispatch(dispatchKeySet, self, indices, output_size, out); + } + + // aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_unpool2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::max_unpool2d_out::redispatch(dispatchKeySet, self, indices, output_size, out); + } + + // aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor + inline at::Tensor max_unpool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) { + return at::_ops::max_unpool2d::redispatch(dispatchKeySet, self, indices, c10::fromIntArrayRefSlow(output_size)); + } + + // aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor + inline at::Tensor max_unpool2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) { + return at::_ops::max_unpool2d::redispatch(dispatchKeySet, self, indices, output_size); + } + + // aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_unpool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d_out::redispatch(dispatchKeySet, self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding, out); + } + + // aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_unpool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::max_unpool3d_out::redispatch(dispatchKeySet, self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding, out); + } + + // aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_unpool3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d_out::redispatch(dispatchKeySet, self, indices, output_size, stride, padding, out); + } + + // aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_unpool3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::max_unpool3d_out::redispatch(dispatchKeySet, self, indices, output_size, stride, padding, out); + } + + // aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor + inline at::Tensor max_unpool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d::redispatch(dispatchKeySet, self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding); + } + + // aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor + inline at::Tensor max_unpool3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d::redispatch(dispatchKeySet, self, indices, output_size, stride, padding); + } + + // aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), out); + } + + // aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::reflection_pad1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), out); + } + + // aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad1d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad1d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad1d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::reflection_pad1d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor + inline at::Tensor reflection_pad1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding)); + } + + // aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor + inline at::Tensor reflection_pad1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad1d::redispatch(dispatchKeySet, self, padding); + } + + // aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad1d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } + + // aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad1d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } + + // aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad1d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad1d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor + inline at::Tensor reflection_pad1d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_backward::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding)); + } + + // aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor + inline at::Tensor reflection_pad1d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad1d_backward::redispatch(dispatchKeySet, grad_output, self, padding); + } + + // aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), out); + } + + // aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::reflection_pad2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), out); + } + + // aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad2d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::reflection_pad2d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor + inline at::Tensor reflection_pad2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding)); + } + + // aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor + inline at::Tensor reflection_pad2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad2d::redispatch(dispatchKeySet, self, padding); + } + + // aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } + + // aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } + + // aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor + inline at::Tensor reflection_pad2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad2d_backward::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding)); + } + + // aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor + inline at::Tensor reflection_pad2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad2d_backward::redispatch(dispatchKeySet, grad_output, self, padding); + } + + // aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), out); + } + + // aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::reflection_pad3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), out); + } + + // aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad3d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::reflection_pad3d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor + inline at::Tensor reflection_pad3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad3d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding)); + } + + // aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor + inline at::Tensor reflection_pad3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad3d::redispatch(dispatchKeySet, self, padding); + } + + // aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } + + // aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } + + // aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad3d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad3d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor + inline at::Tensor reflection_pad3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad3d_backward::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding)); + } + + // aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor + inline at::Tensor reflection_pad3d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad3d_backward::redispatch(dispatchKeySet, grad_output, self, padding); + } + + // aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), out); + } + + // aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), out); + } + + // aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad1d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad1d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad1d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad1d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor + inline at::Tensor replication_pad1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad1d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding)); + } + + // aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor + inline at::Tensor replication_pad1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad1d::redispatch(dispatchKeySet, self, padding); + } + + // aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad1d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } + + // aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad1d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::replication_pad1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } + + // aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad1d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad1d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::replication_pad1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor + inline at::Tensor replication_pad1d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad1d_backward::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding)); + } + + // aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor + inline at::Tensor replication_pad1d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad1d_backward::redispatch(dispatchKeySet, grad_output, self, padding); + } + + // aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), out); + } + + // aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), out); + } + + // aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad2d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad2d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor + inline at::Tensor replication_pad2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding)); + } + + // aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor + inline at::Tensor replication_pad2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad2d::redispatch(dispatchKeySet, self, padding); + } + + // aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } + + // aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::replication_pad2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } + + // aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::replication_pad2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor + inline at::Tensor replication_pad2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad2d_backward::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding)); + } + + // aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor + inline at::Tensor replication_pad2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad2d_backward::redispatch(dispatchKeySet, grad_output, self, padding); + } + + // aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), out); + } + + // aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), out); + } + + // aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad3d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad3d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor + inline at::Tensor replication_pad3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad3d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding)); + } + + // aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor + inline at::Tensor replication_pad3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad3d::redispatch(dispatchKeySet, self, padding); + } + + // aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } + + // aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::replication_pad3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } + + // aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad3d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad3d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::replication_pad3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor + inline at::Tensor replication_pad3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad3d_backward::redispatch(dispatchKeySet, grad_output, self, c10::fromIntArrayRefSlow(padding)); + } + + // aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor + inline at::Tensor replication_pad3d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad3d_backward::redispatch(dispatchKeySet, grad_output, self, padding); + } + + // aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor + inline at::Tensor _pad_circular(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef pad) { + return at::_ops::_pad_circular::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(pad)); + } + + // aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor + inline at::Tensor _pad_circular_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad) { + return at::_ops::_pad_circular::redispatch(dispatchKeySet, self, pad); + } + + // aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor + inline at::Tensor _pad_enum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef pad, int64_t mode, c10::optional value=c10::nullopt) { + return at::_ops::_pad_enum::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(pad), mode, value); + } + + // aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor + inline at::Tensor _pad_enum_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional value=c10::nullopt) { + return at::_ops::_pad_enum::redispatch(dispatchKeySet, self, pad, mode, value); + } + + // aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor + inline at::Tensor pad(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef pad, c10::string_view mode="constant", c10::optional value=c10::nullopt) { + return at::_ops::pad::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(pad), mode, value); + } + + // aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor + inline at::Tensor pad_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode="constant", c10::optional value=c10::nullopt) { + return at::_ops::pad::redispatch(dispatchKeySet, self, pad, mode, value); + } + + // aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_linear1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); + } + + // aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_linear1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_vec::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors); + } + + // aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_bilinear2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bilinear2d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); + } + + // aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_bilinear2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bilinear2d_vec::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors); + } + + // aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bilinear2d_aa_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); + } + + // aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bilinear2d_aa_vec::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors); + } + + // aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_trilinear3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); + } + + // aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_trilinear3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors); + } + + // aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_bicubic2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bicubic2d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); + } + + // aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_bicubic2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bicubic2d_vec::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors); + } + + // aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bicubic2d_aa_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); + } + + // aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bicubic2d_aa_vec::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors); + } + + // aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest1d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors); + } + + // aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest1d_vec::redispatch(dispatchKeySet, input, output_size, scale_factors); + } + + // aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact1d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors); + } + + // aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact1d_vec::redispatch(dispatchKeySet, input, output_size, scale_factors); + } + + // aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest2d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors); + } + + // aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest2d_vec::redispatch(dispatchKeySet, input, output_size, scale_factors); + } + + // aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors); + } + + // aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_vec::redispatch(dispatchKeySet, input, output_size, scale_factors); + } + + // aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest3d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors); + } + + // aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest3d_vec::redispatch(dispatchKeySet, input, output_size, scale_factors); + } + + // aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact3d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors); + } + + // aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact3d_vec::redispatch(dispatchKeySet, input, output_size, scale_factors); + } + + // aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales, out); + } + + // aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales, at::Tensor & out) { + return at::_ops::upsample_linear1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales, out); + } + + // aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales, out); + } + + // aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales, at::Tensor & out) { + return at::_ops::upsample_linear1d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales, out); + } + + // aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor + inline at::Tensor upsample_linear1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales); + } + + // aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor + inline at::Tensor upsample_linear1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d::redispatch(dispatchKeySet, self, output_size, align_corners, scales); + } + + // aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input); + } + + // aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_linear1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input); + } + + // aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales, grad_input); + } + + // aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_linear1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales, grad_input); + } + + // aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor + inline at::Tensor upsample_linear1d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales); + } + + // aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor + inline at::Tensor upsample_linear1d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales); + } + + // aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_bilinear2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_bilinear2d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bilinear2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w); + } + + // aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bilinear2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w); + } + + // aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_bilinear2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_bilinear2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bilinear2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w); + } + + // aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bilinear2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + + // aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_bilinear2d_aa_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_bilinear2d_aa_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w); + } + + // aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w); + } + + // aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w); + } + + // aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + + // aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_bicubic2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_bicubic2d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bicubic2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w); + } + + // aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bicubic2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w); + } + + // aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_bicubic2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_bicubic2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bicubic2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w); + } + + // aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bicubic2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + + // aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_bicubic2d_aa_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_bicubic2d_aa_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w); + } + + // aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w); + } + + // aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w); + } + + // aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + + // aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out); + } + + // aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out); + } + + // aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_d, scales_h, scales_w, out); + } + + // aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_d, scales_h, scales_w, out); + } + + // aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_trilinear3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w); + } + + // aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_trilinear3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d::redispatch(dispatchKeySet, self, output_size, align_corners, scales_d, scales_h, scales_w); + } + + // aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_trilinear3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_trilinear3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_trilinear3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w); + } + + // aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_trilinear3d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w); + } + + // aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales, out); + } + + // aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales, at::Tensor & out) { + return at::_ops::upsample_nearest1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales, out); + } + + // aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_out::redispatch(dispatchKeySet, self, output_size, scales, out); + } + + // aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, at::Tensor & out) { + return at::_ops::upsample_nearest1d_out::redispatch(dispatchKeySet, self, output_size, scales, out); + } + + // aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales, out); + } + + // aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales, out); + } + + // aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_out::redispatch(dispatchKeySet, self, output_size, scales, out); + } + + // aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact1d_out::redispatch(dispatchKeySet, self, output_size, scales, out); + } + + // aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor + inline at::Tensor upsample_nearest1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales); + } + + // aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor + inline at::Tensor upsample_nearest1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d::redispatch(dispatchKeySet, self, output_size, scales); + } + + // aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor + inline at::Tensor _upsample_nearest_exact1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales); + } + + // aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor + inline at::Tensor _upsample_nearest_exact1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d::redispatch(dispatchKeySet, self, output_size, scales); + } + + // aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); + } + + // aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_nearest1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); + } + + // aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales, grad_input); + } + + // aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_nearest1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales, grad_input); + } + + // aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); + } + + // aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); + } + + // aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales, grad_input); + } + + // aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales, grad_input); + } + + // aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor + inline at::Tensor upsample_nearest1d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales); + } + + // aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor + inline at::Tensor upsample_nearest1d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales); + } + + // aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor + inline at::Tensor _upsample_nearest_exact1d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales); + } + + // aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor + inline at::Tensor _upsample_nearest_exact1d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales); + } + + // aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out); + } + + // aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_nearest2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out); + } + + // aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_out::redispatch(dispatchKeySet, self, output_size, scales_h, scales_w, out); + } + + // aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_nearest2d_out::redispatch(dispatchKeySet, self, output_size, scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d_out::redispatch(dispatchKeySet, self, output_size, scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact2d_out::redispatch(dispatchKeySet, self, output_size, scales_h, scales_w, out); + } + + // aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w); + } + + // aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d::redispatch(dispatchKeySet, self, output_size, scales_h, scales_w); + } + + // aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w); + } + + // aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d::redispatch(dispatchKeySet, self, output_size, scales_h, scales_w); + } + + // aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w); + } + + // aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w); + } + + // aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w); + } + + // aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w); + } + + // aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w, out); + } + + // aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_nearest3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w, out); + } + + // aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_out::redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w, out); + } + + // aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_nearest3d_out::redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_out::redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact3d_out::redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w, out); + } + + // aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w); + } + + // aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d::redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w); + } + + // aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w); + } + + // aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d::redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w); + } + + // aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w); + } + + // aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest3d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w); + } + + // aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w); + } + + // aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact3d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w); + } + + // aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & sigmoid_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) { + return at::_ops::sigmoid_backward_grad_input::redispatch(dispatchKeySet, grad_output, output, grad_input); + } + + // aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & sigmoid_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) { + return at::_ops::sigmoid_backward_grad_input::redispatch(dispatchKeySet, grad_output, output, grad_input); + } + + // aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor + inline at::Tensor sigmoid_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output) { + return at::_ops::sigmoid_backward::redispatch(dispatchKeySet, grad_output, output); + } + + // aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & logit_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::logit_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, eps, grad_input); + } + + // aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & logit_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps, at::Tensor & grad_input) { + return at::_ops::logit_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, eps, grad_input); + } + + // aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor + inline at::Tensor logit_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::logit_backward::redispatch(dispatchKeySet, grad_output, self, eps); + } + + // aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & tanh_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) { + return at::_ops::tanh_backward_grad_input::redispatch(dispatchKeySet, grad_output, output, grad_input); + } + + // aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & tanh_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) { + return at::_ops::tanh_backward_grad_input::redispatch(dispatchKeySet, grad_output, output, grad_input); + } + + // aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor + inline at::Tensor tanh_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output) { + return at::_ops::tanh_backward::redispatch(dispatchKeySet, grad_output, output); + } + + // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_transpose2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose2d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out); + } + + // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_transpose2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose2d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out); + } + + // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_transpose2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_transpose2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); + } + + // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_transpose2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); + } + + // aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor + inline at::Tensor slow_conv_transpose2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose2d::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation)); + } + + // aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor + inline at::Tensor slow_conv_transpose2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_transpose2d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); + } + + // aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_transpose3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose3d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out); + } + + // aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_transpose3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose3d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out); + } + + // aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_transpose3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_transpose3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); + } + + // aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_transpose3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); + } + + // aten::slow_conv_transpose3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1) -> Tensor + inline at::Tensor slow_conv_transpose3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose3d::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation)); + } + + // aten::slow_conv_transpose3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1) -> Tensor + inline at::Tensor slow_conv_transpose3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_transpose3d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); + } + + // aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & thnn_conv2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::thnn_conv2d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); + } + + // aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & thnn_conv2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::thnn_conv2d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); + } + + // aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & thnn_conv2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::thnn_conv2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, out); + } + + // aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & thnn_conv2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::thnn_conv2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, out); + } + + // aten::thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor + inline at::Tensor thnn_conv2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::thnn_conv2d::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); + } + + // aten::thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor + inline at::Tensor thnn_conv2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::thnn_conv2d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding); + } + + // aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) output) -> Tensor(a!) + inline at::Tensor & _slow_conv2d_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::_slow_conv2d_forward_output::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output); + } + + // aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) output) -> Tensor(a!) + inline at::Tensor & _slow_conv2d_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) { + return at::_ops::_slow_conv2d_forward_output::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output); + } + + // aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) output) -> Tensor(a!) + inline at::Tensor & _slow_conv2d_forward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::_slow_conv2d_forward_output::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output); + } + + // aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) output) -> Tensor(a!) + inline at::Tensor & _slow_conv2d_forward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) { + return at::_ops::_slow_conv2d_forward_output::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output); + } + + // aten::_slow_conv2d_forward(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding) -> Tensor + inline at::Tensor _slow_conv2d_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::_slow_conv2d_forward::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); + } + + // aten::_slow_conv2d_forward(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding) -> Tensor + inline at::Tensor _slow_conv2d_forward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::_slow_conv2d_forward::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding); + } + + // aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _slow_conv2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::_slow_conv2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), grad_input, grad_weight, grad_bias); + } + + // aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _slow_conv2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) { + return at::_ops::_slow_conv2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), grad_input, grad_weight, grad_bias); + } + + // aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _slow_conv2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::_slow_conv2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias); + } + + // aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _slow_conv2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) { + return at::_ops::_slow_conv2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias); + } + + // aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + inline ::std::tuple _slow_conv2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask::redispatch(dispatchKeySet, grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask); + } + + // aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + inline ::std::tuple _slow_conv2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask::redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, output_mask); + } + + // aten::_conv_depthwise2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & _conv_depthwise2d_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) { + return at::_ops::_conv_depthwise2d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); + } + + // aten::_conv_depthwise2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & _conv_depthwise2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, const at::Tensor & out) { + return at::_ops::_conv_depthwise2d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); + } + + // aten::_conv_depthwise2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & _conv_depthwise2d_symint_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) { + return at::_ops::_conv_depthwise2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::_conv_depthwise2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & _conv_depthwise2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, const at::Tensor & out) { + return at::_ops::_conv_depthwise2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::_conv_depthwise2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation) -> Tensor + inline at::Tensor _conv_depthwise2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) { + return at::_ops::_conv_depthwise2d::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation)); + } + + // aten::_conv_depthwise2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation) -> Tensor + inline at::Tensor _conv_depthwise2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) { + return at::_ops::_conv_depthwise2d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation); + } + + // aten::conv_depthwise3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation) -> Tensor + inline at::Tensor conv_depthwise3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) { + return at::_ops::conv_depthwise3d::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation)); + } + + // aten::conv_depthwise3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation) -> Tensor + inline at::Tensor conv_depthwise3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) { + return at::_ops::conv_depthwise3d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation); + } + + // aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::slow_conv3d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); + } + + // aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::slow_conv3d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); + } + + // aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::slow_conv3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, out); + } + + // aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::slow_conv3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, out); + } + + // aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor + inline at::Tensor slow_conv3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::slow_conv3d::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); + } + + // aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor + inline at::Tensor slow_conv3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::slow_conv3d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding); + } + + // aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) + inline at::Tensor & slow_conv3d_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::slow_conv3d_forward_output::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output); + } + + // aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) + inline at::Tensor & slow_conv3d_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) { + return at::_ops::slow_conv3d_forward_output::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output); + } + + // aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) + inline at::Tensor & slow_conv3d_forward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::slow_conv3d_forward_output::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output); + } + + // aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) + inline at::Tensor & slow_conv3d_forward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) { + return at::_ops::slow_conv3d_forward_output::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output); + } + + // aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor + inline at::Tensor slow_conv3d_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::slow_conv3d_forward::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); + } + + // aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor + inline at::Tensor slow_conv3d_forward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::slow_conv3d_forward::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding); + } + + // aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor + inline at::Tensor slow_conv_dilated2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated2d::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation)); + } + + // aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor + inline at::Tensor slow_conv_dilated2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_dilated2d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation); + } + + // aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor + inline at::Tensor slow_conv_dilated3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated3d::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation)); + } + + // aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor + inline at::Tensor slow_conv_dilated3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_dilated3d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation); + } + + // aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & col2im_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out); + } + + // aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & col2im_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::col2im_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out); + } + + // aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & col2im_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im_out::redispatch(dispatchKeySet, self, output_size, kernel_size, dilation, padding, stride, out); + } + + // aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & col2im_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::col2im_out::redispatch(dispatchKeySet, self, output_size, kernel_size, dilation, padding, stride, out); + } + + // aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + inline at::Tensor col2im(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride); + } + + // aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + inline at::Tensor col2im_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im::redispatch(dispatchKeySet, self, output_size, kernel_size, dilation, padding, stride); + } + + // aten::column_stack(Tensor[] tensors) -> Tensor + inline at::Tensor column_stack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::column_stack::redispatch(dispatchKeySet, tensors); + } + + // aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & column_stack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::column_stack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & column_stack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::column_stack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & im2col_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col_out::redispatch(dispatchKeySet, self, kernel_size, dilation, padding, stride, out); + } + + // aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & im2col_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::im2col_out::redispatch(dispatchKeySet, self, kernel_size, dilation, padding, stride, out); + } + + // aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + inline at::Tensor im2col(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col::redispatch(dispatchKeySet, self, kernel_size, dilation, padding, stride); + } + + // aten::isfinite(Tensor self) -> Tensor + inline at::Tensor isfinite(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::isfinite::redispatch(dispatchKeySet, self); + } + + // aten::isinf(Tensor self) -> Tensor + inline at::Tensor isinf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::isinf::redispatch(dispatchKeySet, self); + } + + // aten::record_stream(Tensor(a!) self, Stream s) -> () + inline void record_stream(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Stream s) { + return at::_ops::record_stream::redispatch(dispatchKeySet, self, s); + } + + // aten::isposinf(Tensor self) -> Tensor + inline at::Tensor isposinf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::isposinf::redispatch(dispatchKeySet, self); + } + + // aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isposinf_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::isposinf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isposinf_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::isposinf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::isneginf(Tensor self) -> Tensor + inline at::Tensor isneginf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::isneginf::redispatch(dispatchKeySet, self); + } + + // aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isneginf_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::isneginf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isneginf_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::isneginf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor + inline at::Tensor _add_batch_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t batch_dim, int64_t level) { + return at::_ops::_add_batch_dim::redispatch(dispatchKeySet, self, batch_dim, level); + } + + // aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor + inline at::Tensor _remove_batch_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) { + return at::_ops::_remove_batch_dim::redispatch(dispatchKeySet, self, level, batch_size, out_dim); + } + + // aten::special_entr(Tensor self) -> Tensor + inline at::Tensor special_entr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_entr::redispatch(dispatchKeySet, self); + } + + // aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_entr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_entr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_entr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_entr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_ndtri(Tensor self) -> Tensor + inline at::Tensor special_ndtri(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_ndtri::redispatch(dispatchKeySet, self); + } + + // aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_ndtri_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_ndtri_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_ndtri_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_ndtri_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_log_ndtr(Tensor self) -> Tensor + inline at::Tensor special_log_ndtr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_log_ndtr::redispatch(dispatchKeySet, self); + } + + // aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_log_ndtr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_log_ndtr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_log_ndtr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_log_ndtr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_expm1(Tensor self) -> Tensor + inline at::Tensor special_expm1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_expm1::redispatch(dispatchKeySet, self); + } + + // aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_expm1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_expm1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_expm1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_expm1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_exp2(Tensor self) -> Tensor + inline at::Tensor special_exp2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_exp2::redispatch(dispatchKeySet, self); + } + + // aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_exp2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_exp2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_exp2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_exp2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_psi(Tensor self) -> Tensor + inline at::Tensor special_psi(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_psi::redispatch(dispatchKeySet, self); + } + + // aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_psi_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_psi_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_psi_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_psi_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_digamma(Tensor self) -> Tensor + inline at::Tensor special_digamma(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_digamma::redispatch(dispatchKeySet, self); + } + + // aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_digamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_digamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_digamma_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_digamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_gammaln(Tensor self) -> Tensor + inline at::Tensor special_gammaln(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_gammaln::redispatch(dispatchKeySet, self); + } + + // aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_gammaln_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_gammaln_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_gammaln_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_gammaln_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erf(Tensor self) -> Tensor + inline at::Tensor special_erf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_erf::redispatch(dispatchKeySet, self); + } + + // aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erf_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_erf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erf_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_erf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erfc(Tensor self) -> Tensor + inline at::Tensor special_erfc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_erfc::redispatch(dispatchKeySet, self); + } + + // aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erfc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_erfc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erfc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_erfc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erfcx(Tensor self) -> Tensor + inline at::Tensor special_erfcx(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_erfcx::redispatch(dispatchKeySet, self); + } + + // aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erfcx_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_erfcx_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erfcx_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_erfcx_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erfinv(Tensor self) -> Tensor + inline at::Tensor special_erfinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_erfinv::redispatch(dispatchKeySet, self); + } + + // aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erfinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_erfinv_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erfinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_erfinv_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_ndtr(Tensor self) -> Tensor + inline at::Tensor special_ndtr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_ndtr::redispatch(dispatchKeySet, self); + } + + // aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_ndtr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_ndtr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_ndtr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_ndtr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_xlog1py(Tensor self, Tensor other) -> Tensor + inline at::Tensor special_xlog1py(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_xlog1py::redispatch(dispatchKeySet, self, other); + } + + // aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor + inline at::Tensor special_xlog1py(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::special_xlog1py_self_scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor special_xlog1py(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::special_xlog1py_other_scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlog1py_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_xlog1py_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlog1py_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_xlog1py_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlog1py_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::special_xlog1py_self_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlog1py_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_xlog1py_self_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlog1py_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::special_xlog1py_other_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlog1py_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::special_xlog1py_other_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlogy(Tensor self, Tensor other) -> Tensor + inline at::Tensor special_xlogy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_xlogy::redispatch(dispatchKeySet, self, other); + } + + // aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor + inline at::Tensor special_xlogy(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::special_xlogy_self_scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor special_xlogy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::special_xlogy_other_scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlogy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_xlogy_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlogy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_xlogy_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlogy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::special_xlogy_self_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlogy_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_xlogy_self_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlogy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::special_xlogy_other_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlogy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::special_xlogy_other_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_zeta(Tensor self, Tensor other) -> Tensor + inline at::Tensor special_zeta(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_zeta::redispatch(dispatchKeySet, self, other); + } + + // aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor + inline at::Tensor special_zeta(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::special_zeta_self_scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor special_zeta(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::special_zeta_other_scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_zeta_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_zeta_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_zeta_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_zeta_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_zeta_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::special_zeta_self_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_zeta_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_zeta_self_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_zeta_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::special_zeta_other_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_zeta_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::special_zeta_other_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_i0(Tensor self) -> Tensor + inline at::Tensor special_i0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_i0::redispatch(dispatchKeySet, self); + } + + // aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_i0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_i0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i0e(Tensor self) -> Tensor + inline at::Tensor special_i0e(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_i0e::redispatch(dispatchKeySet, self); + } + + // aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i0e_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_i0e_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i0e_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_i0e_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i1(Tensor self) -> Tensor + inline at::Tensor special_i1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_i1::redispatch(dispatchKeySet, self); + } + + // aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_i1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_i1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i1e(Tensor self) -> Tensor + inline at::Tensor special_i1e(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_i1e::redispatch(dispatchKeySet, self); + } + + // aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i1e_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_i1e_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i1e_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_i1e_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_logit(Tensor self, float? eps=None) -> Tensor + inline at::Tensor special_logit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::special_logit::redispatch(dispatchKeySet, self, eps); + } + + // aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_logit_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::special_logit_out::redispatch(dispatchKeySet, self, eps, out); + } + + // aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_logit_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional eps, at::Tensor & out) { + return at::_ops::special_logit_out::redispatch(dispatchKeySet, self, eps, out); + } + + // aten::special_polygamma(int n, Tensor self) -> Tensor + inline at::Tensor special_polygamma(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self) { + return at::_ops::special_polygamma::redispatch(dispatchKeySet, n, self); + } + + // aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_polygamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n, const at::Tensor & self) { + return at::_ops::special_polygamma_out::redispatch(dispatchKeySet, n, self, out); + } + + // aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_polygamma_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_polygamma_out::redispatch(dispatchKeySet, n, self, out); + } + + // aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + inline at::Tensor special_logsumexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::special_logsumexp::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_logsumexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::special_logsumexp_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_logsumexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::special_logsumexp_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::special_expit(Tensor self) -> Tensor + inline at::Tensor special_expit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_expit::redispatch(dispatchKeySet, self); + } + + // aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_expit_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_expit_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_expit_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_expit_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_sinc(Tensor self) -> Tensor + inline at::Tensor special_sinc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_sinc::redispatch(dispatchKeySet, self); + } + + // aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_sinc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_sinc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_sinc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_sinc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_round(Tensor self, *, int decimals=0) -> Tensor + inline at::Tensor special_round(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals=0) { + return at::_ops::special_round::redispatch(dispatchKeySet, self, decimals); + } + + // aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_round_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t decimals=0) { + return at::_ops::special_round_out::redispatch(dispatchKeySet, self, decimals, out); + } + + // aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_round_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out) { + return at::_ops::special_round_out::redispatch(dispatchKeySet, self, decimals, out); + } + + // aten::special_log1p(Tensor self) -> Tensor + inline at::Tensor special_log1p(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_log1p::redispatch(dispatchKeySet, self); + } + + // aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_log1p_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_log1p_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_log1p_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_log1p_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor special_log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::special_log_softmax::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_gammainc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammainc_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_gammainc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_gammainc_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_gammainc(Tensor self, Tensor other) -> Tensor + inline at::Tensor special_gammainc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammainc::redispatch(dispatchKeySet, self, other); + } + + // aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_gammaincc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammaincc_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_gammaincc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_gammaincc_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_gammaincc(Tensor self, Tensor other) -> Tensor + inline at::Tensor special_gammaincc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammaincc::redispatch(dispatchKeySet, self, other); + } + + // aten::special_multigammaln(Tensor self, int p) -> Tensor + inline at::Tensor special_multigammaln(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p) { + return at::_ops::special_multigammaln::redispatch(dispatchKeySet, self, p); + } + + // aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_multigammaln_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t p) { + return at::_ops::special_multigammaln_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_multigammaln_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out) { + return at::_ops::special_multigammaln_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + inline at::Tensor special_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::special_softmax::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_fft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); + } + + // aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_fft_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft::redispatch(dispatchKeySet, self, n, dim, norm); + } + + // aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fft_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft_out::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fft_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fft_out::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fft_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fft_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_ifft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); + } + + // aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_ifft_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft::redispatch(dispatchKeySet, self, n, dim, norm); + } + + // aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifft_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft_out::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifft_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft_out::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifft_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifft_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_rfft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); + } + + // aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_rfft_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft::redispatch(dispatchKeySet, self, n, dim, norm); + } + + // aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfft_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft_out::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfft_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft_out::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfft_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfft_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_irfft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); + } + + // aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_irfft_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft::redispatch(dispatchKeySet, self, n, dim, norm); + } + + // aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfft_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft_out::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfft_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft_out::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfft_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfft_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_hfft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); + } + + // aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_hfft_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft::redispatch(dispatchKeySet, self, n, dim, norm); + } + + // aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_hfft_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft_out::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_hfft_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_hfft_out::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_hfft_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_hfft_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_hfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_ihfft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); + } + + // aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_ihfft_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft::redispatch(dispatchKeySet, self, n, dim, norm); + } + + // aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ihfft_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft_out::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ihfft_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ihfft_out::redispatch(dispatchKeySet, self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ihfft_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ihfft_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ihfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_fft2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } + + // aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_fft2_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fft2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fft2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fft2_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fft2_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fft2_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_ifft2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft2::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } + + // aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_ifft2_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft2::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifft2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft2_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifft2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft2_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifft2_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifft2_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_rfft2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } + + // aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_rfft2_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfft2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfft2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft2_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfft2_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfft2_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_irfft2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft2::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } + + // aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_irfft2_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft2::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfft2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft2_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfft2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft2_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfft2_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfft2_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_hfft2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft2::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } + + // aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_hfft2_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft2::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_hfft2_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft2_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_hfft2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_hfft2_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_hfft2_symint_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_hfft2_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_hfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_ihfft2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } + + // aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_ihfft2_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_ihfft2_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_ihfft2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_ihfft2_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_ihfft2_symint_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_ihfft2_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_ihfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_fftn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } + + // aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_fftn_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fftn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fftn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fftn_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fftn_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fftn_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_ifftn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifftn::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } + + // aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_ifftn_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifftn::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifftn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifftn_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifftn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifftn_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifftn_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifftn_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_rfftn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } + + // aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_rfftn_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfftn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfftn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfftn_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfftn_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfftn_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_irfftn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfftn::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } + + // aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_irfftn_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfftn::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfftn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfftn_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfftn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_irfftn_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfftn_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfftn_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_irfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_hfftn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } + + // aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_hfftn_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_hfftn_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_hfftn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_hfftn_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_hfftn_symint_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_hfftn_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_hfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_ihfftn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfftn::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } + + // aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_ihfftn_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfftn::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_ihfftn_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfftn_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_ihfftn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_ihfftn_out::redispatch(dispatchKeySet, self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } + + // aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_ihfftn_symint_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_ihfftn_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_ihfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor fft_fftfreq(c10::DispatchKeySet dispatchKeySet, int64_t n, double d=1.0, at::TensorOptions options={}) { + return at::_ops::fft_fftfreq::redispatch(dispatchKeySet, n, d, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor fft_fftfreq(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::fft_fftfreq::redispatch(dispatchKeySet, n, d, dtype, layout, device, pin_memory); + } + + // aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fftfreq_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n, double d=1.0) { + return at::_ops::fft_fftfreq_out::redispatch(dispatchKeySet, n, d, out); + } + + // aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fftfreq_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out) { + return at::_ops::fft_fftfreq_out::redispatch(dispatchKeySet, n, d, out); + } + + // aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor fft_rfftfreq(c10::DispatchKeySet dispatchKeySet, int64_t n, double d=1.0, at::TensorOptions options={}) { + return at::_ops::fft_rfftfreq::redispatch(dispatchKeySet, n, d, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor fft_rfftfreq(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::fft_rfftfreq::redispatch(dispatchKeySet, n, d, dtype, layout, device, pin_memory); + } + + // aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfftfreq_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n, double d=1.0) { + return at::_ops::fft_rfftfreq_out::redispatch(dispatchKeySet, n, d, out); + } + + // aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfftfreq_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out) { + return at::_ops::fft_rfftfreq_out::redispatch(dispatchKeySet, n, d, out); + } + + // aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor + inline at::Tensor fft_fftshift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt) { + return at::_ops::fft_fftshift::redispatch(dispatchKeySet, self, dim); + } + + // aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor + inline at::Tensor fft_ifftshift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt) { + return at::_ops::fft_ifftshift::redispatch(dispatchKeySet, self, dim); + } + + // aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info) + inline ::std::tuple linalg_cholesky_ex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper=false, bool check_errors=false) { + return at::_ops::linalg_cholesky_ex::redispatch(dispatchKeySet, self, upper, check_errors); + } + + // aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) + inline ::std::tuple linalg_cholesky_ex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false) { + return at::_ops::linalg_cholesky_ex_L::redispatch(dispatchKeySet, self, upper, check_errors, L, info); + } + + // aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) + inline ::std::tuple linalg_cholesky_ex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) { + return at::_ops::linalg_cholesky_ex_L::redispatch(dispatchKeySet, self, upper, check_errors, L, info); + } + + // aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor + inline at::Tensor linalg_cholesky(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper=false) { + return at::_ops::linalg_cholesky::redispatch(dispatchKeySet, self, upper); + } + + // aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cholesky_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool upper=false) { + return at::_ops::linalg_cholesky_out::redispatch(dispatchKeySet, self, upper, out); + } + + // aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cholesky_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) { + return at::_ops::linalg_cholesky_out::redispatch(dispatchKeySet, self, upper, out); + } + + // aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor + inline at::Tensor linalg_cross(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1) { + return at::_ops::linalg_cross::redispatch(dispatchKeySet, self, other, dim); + } + + // aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cross_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1) { + return at::_ops::linalg_cross_out::redispatch(dispatchKeySet, self, other, dim, out); + } + + // aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cross_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out) { + return at::_ops::linalg_cross_out::redispatch(dispatchKeySet, self, other, dim, out); + } + + // aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots) + inline ::std::tuple linalg_lu_factor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_factor::redispatch(dispatchKeySet, A, pivot); + } + + // aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) + inline ::std::tuple linalg_lu_factor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_factor_out::redispatch(dispatchKeySet, A, pivot, LU, pivots); + } + + // aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) + inline ::std::tuple linalg_lu_factor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) { + return at::_ops::linalg_lu_factor_out::redispatch(dispatchKeySet, A, pivot, LU, pivots); + } + + // aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info) + inline ::std::tuple linalg_lu_factor_ex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot=true, bool check_errors=false) { + return at::_ops::linalg_lu_factor_ex::redispatch(dispatchKeySet, A, pivot, check_errors); + } + + // aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) + inline ::std::tuple linalg_lu_factor_ex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot=true, bool check_errors=false) { + return at::_ops::linalg_lu_factor_ex_out::redispatch(dispatchKeySet, A, pivot, check_errors, LU, pivots, info); + } + + // aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) + inline ::std::tuple linalg_lu_factor_ex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) { + return at::_ops::linalg_lu_factor_ex_out::redispatch(dispatchKeySet, A, pivot, check_errors, LU, pivots, info); + } + + // aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U) + inline ::std::tuple linalg_lu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu::redispatch(dispatchKeySet, A, pivot); + } + + // aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) + inline ::std::tuple linalg_lu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_out::redispatch(dispatchKeySet, A, pivot, P, L, U); + } + + // aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) + inline ::std::tuple linalg_lu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) { + return at::_ops::linalg_lu_out::redispatch(dispatchKeySet, A, pivot, P, L, U); + } + + // aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor + inline at::Tensor linalg_lu_solve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false) { + return at::_ops::linalg_lu_solve::redispatch(dispatchKeySet, LU, pivots, B, left, adjoint); + } + + // aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_lu_solve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false) { + return at::_ops::linalg_lu_solve_out::redispatch(dispatchKeySet, LU, pivots, B, left, adjoint, out); + } + + // aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_lu_solve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) { + return at::_ops::linalg_lu_solve_out::redispatch(dispatchKeySet, LU, pivots, B, left, adjoint, out); + } + + // aten::_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots) + inline ::std::tuple _linalg_det(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) { + return at::_ops::_linalg_det::redispatch(dispatchKeySet, A); + } + + // aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) + inline ::std::tuple _linalg_det_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A) { + return at::_ops::_linalg_det_result::redispatch(dispatchKeySet, A, result, LU, pivots); + } + + // aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) + inline ::std::tuple _linalg_det_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots) { + return at::_ops::_linalg_det_result::redispatch(dispatchKeySet, A, result, LU, pivots); + } + + // aten::linalg_det(Tensor A) -> Tensor + inline at::Tensor linalg_det(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) { + return at::_ops::linalg_det::redispatch(dispatchKeySet, A); + } + + // aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_det_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & A) { + return at::_ops::linalg_det_out::redispatch(dispatchKeySet, A, out); + } + + // aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_det_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out) { + return at::_ops::linalg_det_out::redispatch(dispatchKeySet, A, out); + } + + // aten::det(Tensor self) -> Tensor + inline at::Tensor det(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::det::redispatch(dispatchKeySet, self); + } + + // aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info) + inline ::std::tuple linalg_ldl_factor_ex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian=false, bool check_errors=false) { + return at::_ops::linalg_ldl_factor_ex::redispatch(dispatchKeySet, self, hermitian, check_errors); + } + + // aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) + inline ::std::tuple linalg_ldl_factor_ex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false) { + return at::_ops::linalg_ldl_factor_ex_out::redispatch(dispatchKeySet, self, hermitian, check_errors, LD, pivots, info); + } + + // aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) + inline ::std::tuple linalg_ldl_factor_ex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) { + return at::_ops::linalg_ldl_factor_ex_out::redispatch(dispatchKeySet, self, hermitian, check_errors, LD, pivots, info); + } + + // aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots) + inline ::std::tuple linalg_ldl_factor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian=false) { + return at::_ops::linalg_ldl_factor::redispatch(dispatchKeySet, self, hermitian); + } + + // aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots) + inline ::std::tuple linalg_ldl_factor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & LD, at::Tensor & pivots, const at::Tensor & self, bool hermitian=false) { + return at::_ops::linalg_ldl_factor_out::redispatch(dispatchKeySet, self, hermitian, LD, pivots); + } + + // aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots) + inline ::std::tuple linalg_ldl_factor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) { + return at::_ops::linalg_ldl_factor_out::redispatch(dispatchKeySet, self, hermitian, LD, pivots); + } + + // aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor + inline at::Tensor linalg_ldl_solve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) { + return at::_ops::linalg_ldl_solve::redispatch(dispatchKeySet, LD, pivots, B, hermitian); + } + + // aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_ldl_solve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) { + return at::_ops::linalg_ldl_solve_out::redispatch(dispatchKeySet, LD, pivots, B, hermitian, out); + } + + // aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_ldl_solve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_ldl_solve_out::redispatch(dispatchKeySet, LD, pivots, B, hermitian, out); + } + + // aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values) + inline ::std::tuple linalg_lstsq(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, c10::optional rcond=c10::nullopt, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_lstsq::redispatch(dispatchKeySet, self, b, rcond, driver); + } + + // aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) + inline ::std::tuple linalg_lstsq_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, c10::optional rcond=c10::nullopt, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_lstsq_out::redispatch(dispatchKeySet, self, b, rcond, driver, solution, residuals, rank, singular_values); + } + + // aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) + inline ::std::tuple linalg_lstsq_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, c10::optional rcond, c10::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) { + return at::_ops::linalg_lstsq_out::redispatch(dispatchKeySet, self, b, rcond, driver, solution, residuals, rank, singular_values); + } + + // aten::linalg_matmul(Tensor self, Tensor other) -> Tensor + inline at::Tensor linalg_matmul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::linalg_matmul::redispatch(dispatchKeySet, self, other); + } + + // aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matmul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::linalg_matmul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matmul_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::linalg_matmul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor + inline at::Tensor linalg_vecdot(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & y, int64_t dim=-1) { + return at::_ops::linalg_vecdot::redispatch(dispatchKeySet, x, y, dim); + } + + // aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_vecdot_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & y, int64_t dim=-1) { + return at::_ops::linalg_vecdot_out::redispatch(dispatchKeySet, x, y, dim, out); + } + + // aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_vecdot_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) { + return at::_ops::linalg_vecdot_out::redispatch(dispatchKeySet, x, y, dim, out); + } + + // aten::linalg_matrix_exp(Tensor self) -> Tensor + inline at::Tensor linalg_matrix_exp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::linalg_matrix_exp::redispatch(dispatchKeySet, self); + } + + // aten::_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots) + inline ::std::tuple _linalg_slogdet(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) { + return at::_ops::_linalg_slogdet::redispatch(dispatchKeySet, A); + } + + // aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) + inline ::std::tuple _linalg_slogdet_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A) { + return at::_ops::_linalg_slogdet_sign::redispatch(dispatchKeySet, A, sign, logabsdet, LU, pivots); + } + + // aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) + inline ::std::tuple _linalg_slogdet_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots) { + return at::_ops::_linalg_slogdet_sign::redispatch(dispatchKeySet, A, sign, logabsdet, LU, pivots); + } + + // aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet) + inline ::std::tuple linalg_slogdet(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) { + return at::_ops::linalg_slogdet::redispatch(dispatchKeySet, A); + } + + // aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) + inline ::std::tuple linalg_slogdet_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & sign, at::Tensor & logabsdet, const at::Tensor & A) { + return at::_ops::linalg_slogdet_out::redispatch(dispatchKeySet, A, sign, logabsdet); + } + + // aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) + inline ::std::tuple linalg_slogdet_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) { + return at::_ops::linalg_slogdet_out::redispatch(dispatchKeySet, A, sign, logabsdet); + } + + // aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) + inline ::std::tuple slogdet(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::slogdet::redispatch(dispatchKeySet, self); + } + + // aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) + inline ::std::tuple slogdet_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & sign, at::Tensor & logabsdet, const at::Tensor & self) { + return at::_ops::slogdet_out::redispatch(dispatchKeySet, self, sign, logabsdet); + } + + // aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) + inline ::std::tuple slogdet_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet) { + return at::_ops::slogdet_out::redispatch(dispatchKeySet, self, sign, logabsdet); + } + + // aten::logdet(Tensor self) -> Tensor + inline at::Tensor logdet(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::logdet::redispatch(dispatchKeySet, self); + } + + // aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors) + inline ::std::tuple linalg_eig(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::linalg_eig::redispatch(dispatchKeySet, self); + } + + // aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple linalg_eig_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & self) { + return at::_ops::linalg_eig_out::redispatch(dispatchKeySet, self, eigenvalues, eigenvectors); + } + + // aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple linalg_eig_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) { + return at::_ops::linalg_eig_out::redispatch(dispatchKeySet, self, eigenvalues, eigenvectors); + } + + // aten::_linalg_eigvals(Tensor self) -> Tensor + inline at::Tensor _linalg_eigvals(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_linalg_eigvals::redispatch(dispatchKeySet, self); + } + + // aten::linalg_eigvals(Tensor self) -> Tensor + inline at::Tensor linalg_eigvals(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::linalg_eigvals::redispatch(dispatchKeySet, self); + } + + // aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_eigvals_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::linalg_eigvals_out::redispatch(dispatchKeySet, self, out); + } + + // aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_eigvals_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::linalg_eigvals_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors) + inline ::std::tuple _linalg_eigh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true) { + return at::_ops::_linalg_eigh::redispatch(dispatchKeySet, A, UPLO, compute_v); + } + + // aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple _linalg_eigh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true) { + return at::_ops::_linalg_eigh_eigenvalues::redispatch(dispatchKeySet, A, UPLO, compute_v, eigenvalues, eigenvectors); + } + + // aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple _linalg_eigh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) { + return at::_ops::_linalg_eigh_eigenvalues::redispatch(dispatchKeySet, A, UPLO, compute_v, eigenvalues, eigenvectors); + } + + // aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors) + inline ::std::tuple linalg_eigh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigh::redispatch(dispatchKeySet, self, UPLO); + } + + // aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple linalg_eigh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & eigvals, at::Tensor & eigvecs, const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigh_eigvals::redispatch(dispatchKeySet, self, UPLO, eigvals, eigvecs); + } + + // aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple linalg_eigh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) { + return at::_ops::linalg_eigh_eigvals::redispatch(dispatchKeySet, self, UPLO, eigvals, eigvecs); + } + + // aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor + inline at::Tensor linalg_eigvalsh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigvalsh::redispatch(dispatchKeySet, self, UPLO); + } + + // aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_eigvalsh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigvalsh_out::redispatch(dispatchKeySet, self, UPLO, out); + } + + // aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_eigvalsh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) { + return at::_ops::linalg_eigvalsh_out::redispatch(dispatchKeySet, self, UPLO, out); + } + + // aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor + inline at::Tensor linalg_householder_product(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau) { + return at::_ops::linalg_householder_product::redispatch(dispatchKeySet, input, tau); + } + + // aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_householder_product_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & tau) { + return at::_ops::linalg_householder_product_out::redispatch(dispatchKeySet, input, tau, out); + } + + // aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_householder_product_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) { + return at::_ops::linalg_householder_product_out::redispatch(dispatchKeySet, input, tau, out); + } + + // aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info) + inline ::std::tuple linalg_inv_ex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors=false) { + return at::_ops::linalg_inv_ex::redispatch(dispatchKeySet, A, check_errors); + } + + // aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) + inline ::std::tuple linalg_inv_ex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false) { + return at::_ops::linalg_inv_ex_inverse::redispatch(dispatchKeySet, A, check_errors, inverse, info); + } + + // aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) + inline ::std::tuple linalg_inv_ex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) { + return at::_ops::linalg_inv_ex_inverse::redispatch(dispatchKeySet, A, check_errors, inverse, info); + } + + // aten::linalg_inv(Tensor A) -> Tensor + inline at::Tensor linalg_inv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) { + return at::_ops::linalg_inv::redispatch(dispatchKeySet, A); + } + + // aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_inv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & A) { + return at::_ops::linalg_inv_out::redispatch(dispatchKeySet, A, out); + } + + // aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_inv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out) { + return at::_ops::linalg_inv_out::redispatch(dispatchKeySet, A, out); + } + + // aten::inverse(Tensor self) -> Tensor + inline at::Tensor inverse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::inverse::redispatch(dispatchKeySet, self); + } + + // aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & inverse_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::inverse_out::redispatch(dispatchKeySet, self, out); + } + + // aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & inverse_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::inverse_out::redispatch(dispatchKeySet, self, out); + } + + // aten::inner(Tensor self, Tensor other) -> Tensor + inline at::Tensor inner(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::inner::redispatch(dispatchKeySet, self, other); + } + + // aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & inner_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::inner_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & inner_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::inner_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::outer(Tensor self, Tensor vec2) -> Tensor + inline at::Tensor outer(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2) { + return at::_ops::outer::redispatch(dispatchKeySet, self, vec2); + } + + // aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & outer_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2) { + return at::_ops::outer_out::redispatch(dispatchKeySet, self, vec2, out); + } + + // aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & outer_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) { + return at::_ops::outer_out::redispatch(dispatchKeySet, self, vec2, out); + } + + // aten::ger(Tensor self, Tensor vec2) -> Tensor + inline at::Tensor ger(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2) { + return at::_ops::ger::redispatch(dispatchKeySet, self, vec2); + } + + // aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ger_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2) { + return at::_ops::ger_out::redispatch(dispatchKeySet, self, vec2, out); + } + + // aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ger_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) { + return at::_ops::ger_out::redispatch(dispatchKeySet, self, vec2, out); + } + + // aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor linalg_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & ord=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_norm::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype); + } + + // aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor linalg_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_norm_ord_str::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype); + } + + // aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & ord=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_norm_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::linalg_norm_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_norm_ord_str_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::linalg_norm_ord_str_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor linalg_vector_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_vector_norm::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype); + } + + // aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_vector_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_vector_norm_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_vector_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::linalg_vector_norm_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor linalg_matrix_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_matrix_norm::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype); + } + + // aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_matrix_norm_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::linalg_matrix_norm_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor linalg_matrix_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_matrix_norm_str_ord::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype); + } + + // aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_matrix_norm_str_ord_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::linalg_matrix_norm_str_ord_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh) + inline ::std::tuple _linalg_svd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, c10::optional driver=c10::nullopt) { + return at::_ops::_linalg_svd::redispatch(dispatchKeySet, A, full_matrices, compute_uv, driver); + } + + // aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) + inline ::std::tuple _linalg_svd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, c10::optional driver=c10::nullopt) { + return at::_ops::_linalg_svd_U::redispatch(dispatchKeySet, A, full_matrices, compute_uv, driver, U, S, Vh); + } + + // aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) + inline ::std::tuple _linalg_svd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) { + return at::_ops::_linalg_svd_U::redispatch(dispatchKeySet, A, full_matrices, compute_uv, driver, U, S, Vh); + } + + // aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh) + inline ::std::tuple linalg_svd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices=true, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_svd::redispatch(dispatchKeySet, A, full_matrices, driver); + } + + // aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) + inline ::std::tuple linalg_svd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=true, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_svd_U::redispatch(dispatchKeySet, A, full_matrices, driver, U, S, Vh); + } + + // aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) + inline ::std::tuple linalg_svd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, c10::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) { + return at::_ops::linalg_svd_U::redispatch(dispatchKeySet, A, full_matrices, driver, U, S, Vh); + } + + // aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor + inline at::Tensor linalg_svdvals(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_svdvals::redispatch(dispatchKeySet, A, driver); + } + + // aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_svdvals_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & A, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_svdvals_out::redispatch(dispatchKeySet, A, driver, out); + } + + // aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_svdvals_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::optional driver, at::Tensor & out) { + return at::_ops::linalg_svdvals_out::redispatch(dispatchKeySet, A, driver, out); + } + + // aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor + inline at::Tensor linalg_cond(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p=c10::nullopt) { + return at::_ops::linalg_cond::redispatch(dispatchKeySet, self, p); + } + + // aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cond_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p=c10::nullopt) { + return at::_ops::linalg_cond_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cond_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::Tensor & out) { + return at::_ops::linalg_cond_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::linalg_cond.p_str(Tensor self, str p) -> Tensor + inline at::Tensor linalg_cond(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p) { + return at::_ops::linalg_cond_p_str::redispatch(dispatchKeySet, self, p); + } + + // aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cond_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::string_view p) { + return at::_ops::linalg_cond_p_str_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cond_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p, at::Tensor & out) { + return at::_ops::linalg_cond_p_str_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor + inline at::Tensor linalg_pinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & atol={}, const c10::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_tensor::redispatch(dispatchKeySet, self, atol, rtol, hermitian); + } + + // aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & atol={}, const c10::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_tensor_out::redispatch(dispatchKeySet, self, atol, rtol, hermitian, out); + } + + // aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & atol, const c10::optional & rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_atol_rtol_tensor_out::redispatch(dispatchKeySet, self, atol, rtol, hermitian, out); + } + + // aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor + inline at::Tensor linalg_pinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_float::redispatch(dispatchKeySet, self, atol, rtol, hermitian); + } + + // aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_float_out::redispatch(dispatchKeySet, self, atol, rtol, hermitian, out); + } + + // aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_atol_rtol_float_out::redispatch(dispatchKeySet, self, atol, rtol, hermitian, out); + } + + // aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor + inline at::Tensor linalg_pinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond, bool hermitian=false) { + return at::_ops::linalg_pinv::redispatch(dispatchKeySet, self, rcond, hermitian); + } + + // aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor + inline at::Tensor linalg_pinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false) { + return at::_ops::linalg_pinv_rcond_tensor::redispatch(dispatchKeySet, self, rcond, hermitian); + } + + // aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double rcond, bool hermitian=false) { + return at::_ops::linalg_pinv_out::redispatch(dispatchKeySet, self, rcond, hermitian, out); + } + + // aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_out::redispatch(dispatchKeySet, self, rcond, hermitian, out); + } + + // aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false) { + return at::_ops::linalg_pinv_out_rcond_tensor::redispatch(dispatchKeySet, self, rcond, hermitian, out); + } + + // aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_out_rcond_tensor::redispatch(dispatchKeySet, self, rcond, hermitian, out); + } + + // aten::_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info) + inline ::std::tuple _linalg_solve_ex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) { + return at::_ops::_linalg_solve_ex::redispatch(dispatchKeySet, A, B, left, check_errors); + } + + // aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) + inline ::std::tuple _linalg_solve_ex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) { + return at::_ops::_linalg_solve_ex_result::redispatch(dispatchKeySet, A, B, left, check_errors, result, LU, pivots, info); + } + + // aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) + inline ::std::tuple _linalg_solve_ex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) { + return at::_ops::_linalg_solve_ex_result::redispatch(dispatchKeySet, A, B, left, check_errors, result, LU, pivots, info); + } + + // aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info) + inline ::std::tuple linalg_solve_ex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) { + return at::_ops::linalg_solve_ex::redispatch(dispatchKeySet, A, B, left, check_errors); + } + + // aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info) + inline ::std::tuple linalg_solve_ex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & result, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) { + return at::_ops::linalg_solve_ex_out::redispatch(dispatchKeySet, A, B, left, check_errors, result, info); + } + + // aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info) + inline ::std::tuple linalg_solve_ex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) { + return at::_ops::linalg_solve_ex_out::redispatch(dispatchKeySet, A, B, left, check_errors, result, info); + } + + // aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor + inline at::Tensor linalg_solve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left=true) { + return at::_ops::linalg_solve::redispatch(dispatchKeySet, A, B, left); + } + + // aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_solve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & A, const at::Tensor & B, bool left=true) { + return at::_ops::linalg_solve_out::redispatch(dispatchKeySet, A, B, left, out); + } + + // aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_solve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) { + return at::_ops::linalg_solve_out::redispatch(dispatchKeySet, A, B, left, out); + } + + // aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor + inline at::Tensor linalg_tensorinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind=2) { + return at::_ops::linalg_tensorinv::redispatch(dispatchKeySet, self, ind); + } + + // aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_tensorinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t ind=2) { + return at::_ops::linalg_tensorinv_out::redispatch(dispatchKeySet, self, ind, out); + } + + // aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_tensorinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind, at::Tensor & out) { + return at::_ops::linalg_tensorinv_out::redispatch(dispatchKeySet, self, ind, out); + } + + // aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor + inline at::Tensor linalg_tensorsolve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims=c10::nullopt) { + return at::_ops::linalg_tensorsolve::redispatch(dispatchKeySet, self, other, dims); + } + + // aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_tensorsolve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims=c10::nullopt) { + return at::_ops::linalg_tensorsolve_out::redispatch(dispatchKeySet, self, other, dims, out); + } + + // aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_tensorsolve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) { + return at::_ops::linalg_tensorsolve_out::redispatch(dispatchKeySet, self, other, dims, out); + } + + // aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R) + inline ::std::tuple linalg_qr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view mode="reduced") { + return at::_ops::linalg_qr::redispatch(dispatchKeySet, A, mode); + } + + // aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + inline ::std::tuple linalg_qr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & Q, at::Tensor & R, const at::Tensor & A, c10::string_view mode="reduced") { + return at::_ops::linalg_qr_out::redispatch(dispatchKeySet, A, mode, Q, R); + } + + // aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + inline ::std::tuple linalg_qr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) { + return at::_ops::linalg_qr_out::redispatch(dispatchKeySet, A, mode, Q, R); + } + + // aten::linalg_matrix_power(Tensor self, int n) -> Tensor + inline at::Tensor linalg_matrix_power(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n) { + return at::_ops::linalg_matrix_power::redispatch(dispatchKeySet, self, n); + } + + // aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_power_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t n) { + return at::_ops::linalg_matrix_power_out::redispatch(dispatchKeySet, self, n, out); + } + + // aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_power_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out) { + return at::_ops::linalg_matrix_power_out::redispatch(dispatchKeySet, self, n, out); + } + + // aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor + inline at::Tensor linalg_matrix_rank(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & atol={}, const c10::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_atol_rtol_tensor::redispatch(dispatchKeySet, input, atol, rtol, hermitian); + } + + // aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const c10::optional & atol={}, const c10::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_atol_rtol_tensor_out::redispatch(dispatchKeySet, input, atol, rtol, hermitian, out); + } + + // aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & atol, const c10::optional & rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_matrix_rank_atol_rtol_tensor_out::redispatch(dispatchKeySet, input, atol, rtol, hermitian, out); + } + + // aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor + inline at::Tensor linalg_matrix_rank(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_atol_rtol_float::redispatch(dispatchKeySet, self, atol, rtol, hermitian); + } + + // aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_atol_rtol_float_out::redispatch(dispatchKeySet, self, atol, rtol, hermitian, out); + } + + // aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_matrix_rank_atol_rtol_float_out::redispatch(dispatchKeySet, self, atol, rtol, hermitian, out); + } + + // aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor + inline at::Tensor linalg_matrix_rank(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank::redispatch(dispatchKeySet, self, tol, hermitian); + } + + // aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double tol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_out::redispatch(dispatchKeySet, self, tol, hermitian, out); + } + + // aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_matrix_rank_out::redispatch(dispatchKeySet, self, tol, hermitian, out); + } + + // aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor + inline at::Tensor linalg_matrix_rank(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_tol_tensor::redispatch(dispatchKeySet, input, tol, hermitian); + } + + // aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & tol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_out_tol_tensor::redispatch(dispatchKeySet, input, tol, hermitian, out); + } + + // aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_matrix_rank_out_tol_tensor::redispatch(dispatchKeySet, input, tol, hermitian, out); + } + + // aten::linalg_multi_dot(Tensor[] tensors) -> Tensor + inline at::Tensor linalg_multi_dot(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::linalg_multi_dot::redispatch(dispatchKeySet, tensors); + } + + // aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_multi_dot_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::linalg_multi_dot_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_multi_dot_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::linalg_multi_dot_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor + inline at::Tensor nested_to_padded_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt) { + return at::_ops::nested_to_padded_tensor::redispatch(dispatchKeySet, self, padding, output_size); + } + + // aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor + inline at::Tensor _test_serialization_subcmul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_test_serialization_subcmul::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_test_parallel_materialize(Tensor self, int num_parallel, bool skip_first=False) -> Tensor + inline at::Tensor _test_parallel_materialize(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_parallel, bool skip_first=false) { + return at::_ops::_test_parallel_materialize::redispatch(dispatchKeySet, self, num_parallel, skip_first); + } + + // aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor + inline at::Tensor _test_optional_intlist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends) { + return at::_ops::_test_optional_intlist::redispatch(dispatchKeySet, values, addends); + } + + // aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor + inline at::Tensor _test_optional_filled_intlist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends) { + return at::_ops::_test_optional_filled_intlist::redispatch(dispatchKeySet, values, addends); + } + + // aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor + inline at::Tensor _test_optional_floatlist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, c10::optional> addends) { + return at::_ops::_test_optional_floatlist::redispatch(dispatchKeySet, values, addends); + } + + // aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor + inline at::Tensor _test_string_default(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, c10::string_view a="\"'\\", c10::string_view b="\"'\\") { + return at::_ops::_test_string_default::redispatch(dispatchKeySet, dummy, a, b); + } + + // aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor + inline at::Tensor _test_ambiguous_defaults(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, int64_t a=1, int64_t b=1) { + return at::_ops::_test_ambiguous_defaults_a::redispatch(dispatchKeySet, dummy, a, b); + } + + // aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor + inline at::Tensor _test_ambiguous_defaults(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, int64_t a, c10::string_view b) { + return at::_ops::_test_ambiguous_defaults_b::redispatch(dispatchKeySet, dummy, a, b); + } + + // aten::_test_warn_in_autograd(Tensor self) -> Tensor + inline at::Tensor _test_warn_in_autograd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_test_warn_in_autograd::redispatch(dispatchKeySet, self); + } + + // aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor + inline at::Tensor _test_autograd_multiple_dispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage::redispatch(dispatchKeySet, self); + } + + // aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor + inline at::Tensor _test_autograd_multiple_dispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool b) { + return at::_ops::_test_autograd_multiple_dispatch_ntonly::redispatch(dispatchKeySet, self, b); + } + + // aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a) + inline at::Tensor _test_autograd_multiple_dispatch_view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_view::redispatch(dispatchKeySet, self); + } + + // aten::_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor + inline at::Tensor _test_autograd_multiple_dispatch_view_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_view_copy::redispatch(dispatchKeySet, self); + } + + // aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor + inline at::Tensor segment_reduce(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths={}, const c10::optional & indices={}, const c10::optional & offsets={}, int64_t axis=0, bool unsafe=false, const c10::optional & initial=c10::nullopt) { + return at::_ops::segment_reduce::redispatch(dispatchKeySet, data, reduce, lengths, indices, offsets, axis, unsafe, initial); + } + + // aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor + inline at::Tensor _segment_reduce_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths={}, const c10::optional & offsets={}, int64_t axis=0, const c10::optional & initial=c10::nullopt) { + return at::_ops::_segment_reduce_backward::redispatch(dispatchKeySet, grad, output, data, reduce, lengths, offsets, axis, initial); + } + + // aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor + inline at::Tensor pad_sequence(c10::DispatchKeySet dispatchKeySet, at::TensorList sequences, bool batch_first=false, double padding_value=0.0) { + return at::_ops::pad_sequence::redispatch(dispatchKeySet, sequences, batch_first, padding_value); + } + + // aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor + inline at::Tensor flatten_dense_tensors(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::flatten_dense_tensors::redispatch(dispatchKeySet, tensors); + } + + // aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[] + inline ::std::vector unflatten_dense_tensors(c10::DispatchKeySet dispatchKeySet, const at::Tensor & flat, at::TensorList tensors) { + return at::_ops::unflatten_dense_tensors::redispatch(dispatchKeySet, flat, tensors); + } + + // aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _nested_tensor_from_tensor_list(c10::DispatchKeySet dispatchKeySet, at::TensorList list, c10::optional dtype=c10::nullopt, c10::optional layout=c10::nullopt, c10::optional device=c10::nullopt, c10::optional pin_memory=c10::nullopt) { + return at::_ops::_nested_tensor_from_tensor_list::redispatch(dispatchKeySet, list, dtype, layout, device, pin_memory); + } + + // aten::_fw_primal_copy(Tensor self, int level) -> Tensor + inline at::Tensor _fw_primal_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level) { + return at::_ops::_fw_primal_copy::redispatch(dispatchKeySet, self, level); + } + + // aten::_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor + inline at::Tensor _make_dual_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { + return at::_ops::_make_dual_copy::redispatch(dispatchKeySet, primal, tangent, level); + } + + // aten::view_as_real_copy(Tensor self) -> Tensor + inline at::Tensor view_as_real_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::view_as_real_copy::redispatch(dispatchKeySet, self); + } + + // aten::view_as_complex_copy(Tensor self) -> Tensor + inline at::Tensor view_as_complex_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::view_as_complex_copy::redispatch(dispatchKeySet, self); + } + + // aten::_conj_copy(Tensor self) -> Tensor + inline at::Tensor _conj_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_conj_copy::redispatch(dispatchKeySet, self); + } + + // aten::_neg_view_copy(Tensor self) -> Tensor + inline at::Tensor _neg_view_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_neg_view_copy::redispatch(dispatchKeySet, self); + } + + // aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor + inline at::Tensor as_strided_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); + } + + // aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor + inline at::Tensor as_strided_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy::redispatch(dispatchKeySet, self, size, stride, storage_offset); + } + + // aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor + inline at::Tensor _sparse_broadcast_to_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_sparse_broadcast_to_copy::redispatch(dispatchKeySet, self, size); + } + + // aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor + inline at::Tensor diagonal_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) { + return at::_ops::diagonal_copy::redispatch(dispatchKeySet, self, offset, dim1, dim2); + } + + // aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor + inline at::Tensor expand_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, bool implicit=false) { + return at::_ops::expand_copy::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), implicit); + } + + // aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor + inline at::Tensor expand_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit=false) { + return at::_ops::expand_copy::redispatch(dispatchKeySet, self, size, implicit); + } + + // aten::permute_copy(Tensor self, int[] dims) -> Tensor + inline at::Tensor permute_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::permute_copy::redispatch(dispatchKeySet, self, dims); + } + + // aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor + inline at::Tensor _reshape_alias_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::_reshape_alias_copy::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); + } + + // aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor + inline at::Tensor _reshape_alias_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::_reshape_alias_copy::redispatch(dispatchKeySet, self, size, stride); + } + + // aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor + inline at::Tensor select_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, int64_t index) { + return at::_ops::select_copy_int::redispatch(dispatchKeySet, self, dim, index); + } + + // aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor + inline at::Tensor select_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index) { + return at::_ops::select_copy_int::redispatch(dispatchKeySet, self, dim, index); + } + + // aten::detach_copy(Tensor self) -> Tensor + inline at::Tensor detach_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::detach_copy::redispatch(dispatchKeySet, self); + } + + // aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + inline at::Tensor slice_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_copy_Tensor::redispatch(dispatchKeySet, self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); + } + + // aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + inline at::Tensor slice_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_copy_Tensor::redispatch(dispatchKeySet, self, dim, start, end, step); + } + + // aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] + inline ::std::vector split_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor::redispatch(dispatchKeySet, self, split_size, dim); + } + + // aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] + inline ::std::vector split_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor::redispatch(dispatchKeySet, self, split_size, dim); + } + + // aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] + inline ::std::vector split_with_sizes_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(split_sizes), dim); + } + + // aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] + inline ::std::vector split_with_sizes_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy::redispatch(dispatchKeySet, self, split_sizes, dim); + } + + // aten::squeeze_copy(Tensor self) -> Tensor + inline at::Tensor squeeze_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::squeeze_copy::redispatch(dispatchKeySet, self); + } + + // aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor + inline at::Tensor squeeze_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::squeeze_copy_dim::redispatch(dispatchKeySet, self, dim); + } + + // aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor + inline at::Tensor squeeze_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::squeeze_copy_dims::redispatch(dispatchKeySet, self, dim); + } + + // aten::t_copy(Tensor self) -> Tensor + inline at::Tensor t_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::t_copy::redispatch(dispatchKeySet, self); + } + + // aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor + inline at::Tensor transpose_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::transpose_copy_int::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::unsqueeze_copy(Tensor self, int dim) -> Tensor + inline at::Tensor unsqueeze_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::unsqueeze_copy::redispatch(dispatchKeySet, self, dim); + } + + // aten::_indices_copy(Tensor self) -> Tensor + inline at::Tensor _indices_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_indices_copy::redispatch(dispatchKeySet, self); + } + + // aten::_values_copy(Tensor self) -> Tensor + inline at::Tensor _values_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_values_copy::redispatch(dispatchKeySet, self); + } + + // aten::indices_copy(Tensor self) -> Tensor + inline at::Tensor indices_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::indices_copy::redispatch(dispatchKeySet, self); + } + + // aten::values_copy(Tensor self) -> Tensor + inline at::Tensor values_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::values_copy::redispatch(dispatchKeySet, self); + } + + // aten::crow_indices_copy(Tensor self) -> Tensor + inline at::Tensor crow_indices_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::crow_indices_copy::redispatch(dispatchKeySet, self); + } + + // aten::col_indices_copy(Tensor self) -> Tensor + inline at::Tensor col_indices_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::col_indices_copy::redispatch(dispatchKeySet, self); + } + + // aten::ccol_indices_copy(Tensor self) -> Tensor + inline at::Tensor ccol_indices_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::ccol_indices_copy::redispatch(dispatchKeySet, self); + } + + // aten::row_indices_copy(Tensor self) -> Tensor + inline at::Tensor row_indices_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::row_indices_copy::redispatch(dispatchKeySet, self); + } + + // aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[] + inline ::std::vector unbind_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=0) { + return at::_ops::unbind_copy_int::redispatch(dispatchKeySet, self, dim); + } + + // aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () + inline void unbind_copy_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, int64_t dim=0) { + return at::_ops::unbind_copy_int_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () + inline void unbind_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::TensorList out) { + return at::_ops::unbind_copy_int_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () + inline void split_copy_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor_out::redispatch(dispatchKeySet, self, split_size, dim, out); + } + + // aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () + inline void split_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) { + return at::_ops::split_copy_Tensor_out::redispatch(dispatchKeySet, self, split_size, dim, out); + } + + // aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () + inline void split_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor_out::redispatch(dispatchKeySet, self, split_size, dim, out); + } + + // aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () + inline void split_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { + return at::_ops::split_copy_Tensor_out::redispatch(dispatchKeySet, self, split_size, dim, out); + } + + // aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () + inline void split_with_sizes_copy_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(split_sizes), dim, out); + } + + // aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () + inline void split_with_sizes_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::split_with_sizes_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(split_sizes), dim, out); + } + + // aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () + inline void split_with_sizes_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy_out::redispatch(dispatchKeySet, self, split_sizes, dim, out); + } + + // aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () + inline void split_with_sizes_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::split_with_sizes_copy_out::redispatch(dispatchKeySet, self, split_sizes, dim, out); + } + + // aten::view_copy(Tensor self, SymInt[] size) -> Tensor + inline at::Tensor view_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::view_copy::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size)); + } + + // aten::view_copy(Tensor self, SymInt[] size) -> Tensor + inline at::Tensor view_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::view_copy::redispatch(dispatchKeySet, self, size); + } + + // aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor + inline at::Tensor view_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) { + return at::_ops::view_copy_dtype::redispatch(dispatchKeySet, self, dtype); + } + + // aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor + inline at::Tensor unfold_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { + return at::_ops::unfold_copy::redispatch(dispatchKeySet, self, dimension, size, step); + } + + // aten::alias_copy(Tensor self) -> Tensor + inline at::Tensor alias_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::alias_copy::redispatch(dispatchKeySet, self); + } + + // aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor + inline at::Tensor to_padded_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt) { + return at::_ops::to_padded_tensor::redispatch(dispatchKeySet, self, padding, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt); + } + + // aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor + inline at::Tensor to_padded_tensor_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size=c10::nullopt) { + return at::_ops::to_padded_tensor::redispatch(dispatchKeySet, self, padding, output_size); + } + + // aten::_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor + inline at::Tensor _nested_tensor_softmax_with_shape(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & query) { + return at::_ops::_nested_tensor_softmax_with_shape::redispatch(dispatchKeySet, self, query); + } + + // aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor + inline at::Tensor _transformer_encoder_layer_fwd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask={}, c10::optional mask_type=c10::nullopt) { + return at::_ops::_transformer_encoder_layer_fwd::redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type); + } + + // aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor) + inline ::std::tuple _native_multi_head_attention(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask={}, bool need_weights=true, bool average_attn_weights=true, c10::optional mask_type=c10::nullopt) { + return at::_ops::_native_multi_head_attention::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type); + } + + // aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> Tensor + inline at::Tensor scaled_dot_product_attention(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, c10::optional scale=c10::nullopt) { + return at::_ops::scaled_dot_product_attention::redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, is_causal, scale); + } + + // aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> int + inline int64_t _fused_sdp_choice(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, c10::optional scale=c10::nullopt) { + return at::_ops::_fused_sdp_choice::redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, is_causal, scale); + } + + // aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None) -> (Tensor, Tensor) + inline ::std::tuple _scaled_dot_product_attention_math(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, const c10::optional & dropout_mask={}, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_attention_math::redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale); + } + + // aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) + inline ::std::tuple _scaled_dot_product_flash_attention(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p=0.0, bool is_causal=false, bool return_debug_mask=false, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_flash_attention::redispatch(dispatchKeySet, query, key, value, dropout_p, is_causal, return_debug_mask, scale); + } + + // aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp) + inline ::std::tuple _scaled_dot_product_flash_attention_for_cpu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p=0.0, bool is_causal=false, const c10::optional & attn_mask={}, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_flash_attention_for_cpu::redispatch(dispatchKeySet, query, key, value, dropout_p, is_causal, attn_mask, scale); + } + + // aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) + inline ::std::tuple _scaled_dot_product_flash_attention_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_flash_attention_backward::redispatch(dispatchKeySet, grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); + } + + // aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) + inline ::std::tuple _scaled_dot_product_flash_attention_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_flash_attention_backward::redispatch(dispatchKeySet, grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); + } + + // aten::_scaled_dot_product_flash_attention_for_cpu_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, float dropout_p, bool is_causal, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) + inline ::std::tuple _scaled_dot_product_flash_attention_for_cpu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, double dropout_p, bool is_causal, const c10::optional & attn_mask={}, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_flash_attention_for_cpu_backward::redispatch(dispatchKeySet, grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale); + } + + // aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset) + inline ::std::tuple _scaled_dot_product_efficient_attention(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_bias, bool compute_log_sumexp, double dropout_p=0.0, bool is_causal=false, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_efficient_attention::redispatch(dispatchKeySet, query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, scale); + } + + // aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _scaled_dot_product_efficient_attention_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array grad_input_mask, bool is_causal=false, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_efficient_attention_backward::redispatch(dispatchKeySet, grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale); + } + + // aten::_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset) + inline ::std::tuple _scaled_dot_product_cudnn_attention(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p=0.0, bool is_causal=false, bool return_debug_mask=false, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_cudnn_attention::redispatch(dispatchKeySet, query, key, value, dropout_p, is_causal, return_debug_mask, scale); + } + + // aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, bool return_debug_mask, *, float? scale=None) -> (Tensor output, Tensor softmax_logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) + inline ::std::tuple _flash_attention_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & cum_seq_q, const c10::optional & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale=c10::nullopt) { + return at::_ops::_flash_attention_forward::redispatch(dispatchKeySet, query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale); + } + + // aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, bool return_debug_mask, *, float? scale=None) -> (Tensor output, Tensor softmax_logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) + inline ::std::tuple _flash_attention_forward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & cum_seq_q, const c10::optional & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale=c10::nullopt) { + return at::_ops::_flash_attention_forward::redispatch(dispatchKeySet, query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale); + } + + // aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _flash_attention_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale=c10::nullopt) { + return at::_ops::_flash_attention_backward::redispatch(dispatchKeySet, grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); + } + + // aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _flash_attention_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale=c10::nullopt) { + return at::_ops::_flash_attention_backward::redispatch(dispatchKeySet, grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); + } + + // aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, int? max_seqlen_k, float dropout_p, int custom_mask_type, bool compute_log_sumexp=False, *, float? scale=None, Tensor? causal_diagonal=None, Tensor? seqlen_k=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, SymInt max_seqlen_batch_q, SymInt max_seqlen_batch_k) + inline ::std::tuple _efficient_attention_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & bias, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, c10::optional max_seqlen_q, c10::optional max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp=false, c10::optional scale=c10::nullopt, const c10::optional & causal_diagonal={}, const c10::optional & seqlen_k={}) { + return at::_ops::_efficient_attention_forward::redispatch(dispatchKeySet, query, key, value, bias, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, custom_mask_type, compute_log_sumexp, scale, causal_diagonal, seqlen_k); + } + + // aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor out, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt max_seqlen_q, SymInt max_seqlen_k, Tensor logsumexp, float dropout_p, Tensor philox_seed, Tensor philox_offset, int custom_mask_type, bool bias_requires_grad, *, float? scale=None, int? num_splits_key=None) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _efficient_attention_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & bias, const at::Tensor & out, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, int64_t max_seqlen_q, int64_t max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, c10::optional scale=c10::nullopt, c10::optional num_splits_key=c10::nullopt) { + return at::_ops::_efficient_attention_backward::redispatch(dispatchKeySet, grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key); + } + + // aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor out, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt max_seqlen_q, SymInt max_seqlen_k, Tensor logsumexp, float dropout_p, Tensor philox_seed, Tensor philox_offset, int custom_mask_type, bool bias_requires_grad, *, float? scale=None, int? num_splits_key=None) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _efficient_attention_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & bias, const at::Tensor & out, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, c10::SymInt max_seqlen_q, c10::SymInt max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, c10::optional scale=c10::nullopt, c10::optional num_splits_key=c10::nullopt) { + return at::_ops::_efficient_attention_backward::redispatch(dispatchKeySet, grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key); + } + + // aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor + inline at::Tensor _triton_scaled_dot_attention(c10::DispatchKeySet dispatchKeySet, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p=0.0) { + return at::_ops::_triton_scaled_dot_attention::redispatch(dispatchKeySet, q, k, v, dropout_p); + } + + // aten::_fill_mem_eff_dropout_mask_(Tensor(a!) self, float dropout_p, int seed, int offset) -> Tensor(a!) + inline at::Tensor & _fill_mem_eff_dropout_mask_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double dropout_p, int64_t seed, int64_t offset) { + return at::_ops::_fill_mem_eff_dropout_mask_::redispatch(dispatchKeySet, self, dropout_p, seed, offset); + } + + // aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor + inline at::Tensor _triton_multi_head_attention(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask={}) { + return at::_ops::_triton_multi_head_attention::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask); + } + + // aten::special_airy_ai(Tensor x) -> Tensor + inline at::Tensor special_airy_ai(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) { + return at::_ops::special_airy_ai::redispatch(dispatchKeySet, x); + } + + // aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_airy_ai_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x) { + return at::_ops::special_airy_ai_out::redispatch(dispatchKeySet, x, out); + } + + // aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_airy_ai_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) { + return at::_ops::special_airy_ai_out::redispatch(dispatchKeySet, x, out); + } + + // aten::special_bessel_j0(Tensor self) -> Tensor + inline at::Tensor special_bessel_j0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_bessel_j0::redispatch(dispatchKeySet, self); + } + + // aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_j0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_bessel_j0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_j0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_bessel_j0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_j1(Tensor self) -> Tensor + inline at::Tensor special_bessel_j1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_bessel_j1::redispatch(dispatchKeySet, self); + } + + // aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_j1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_bessel_j1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_j1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_bessel_j1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_y0(Tensor self) -> Tensor + inline at::Tensor special_bessel_y0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_bessel_y0::redispatch(dispatchKeySet, self); + } + + // aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_y0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_bessel_y0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_y0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_bessel_y0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_y1(Tensor self) -> Tensor + inline at::Tensor special_bessel_y1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_bessel_y1::redispatch(dispatchKeySet, self); + } + + // aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_y1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_bessel_y1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_y1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_bessel_y1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_t(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_t::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_t(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_t_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_t(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_t_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_t_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_t_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_t_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_t_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_t_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_t_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_t_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_t_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_t_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_t_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_t_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_t_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_u(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_u::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_u(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_u_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_u(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_u_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_u_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_u_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_u_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_u_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_u_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_u_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_u_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_u_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_u_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_u_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_u_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_u_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_v(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_v::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_v(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_v_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_v(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_v_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_v_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_v_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_v_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_v_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_v_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_v_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_v_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_v_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_v_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_v_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_v_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_v_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_w(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_w::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_w(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_w_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_w(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_w_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_w_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_w_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_w_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_w_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_w_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_w_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_w_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_w_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_w_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_w_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_w_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_w_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_hermite_polynomial_h(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_h::redispatch(dispatchKeySet, x, n); + } + + // aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_hermite_polynomial_h(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_h_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_hermite_polynomial_h(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_hermite_polynomial_h_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_h_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_h_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_h_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_h_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_h_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_h_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_h_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_h_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_h_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_hermite_polynomial_h_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_h_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_h_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_hermite_polynomial_he(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_he::redispatch(dispatchKeySet, x, n); + } + + // aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_hermite_polynomial_he(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_he_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_hermite_polynomial_he(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_hermite_polynomial_he_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_he_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_he_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_he_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_he_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_he_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_he_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_he_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_he_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_he_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_hermite_polynomial_he_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_he_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_he_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_laguerre_polynomial_l(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_laguerre_polynomial_l::redispatch(dispatchKeySet, x, n); + } + + // aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_laguerre_polynomial_l(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_laguerre_polynomial_l_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_laguerre_polynomial_l(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_laguerre_polynomial_l_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_laguerre_polynomial_l_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_laguerre_polynomial_l_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_laguerre_polynomial_l_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_laguerre_polynomial_l_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_laguerre_polynomial_l_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_laguerre_polynomial_l_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_laguerre_polynomial_l_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_laguerre_polynomial_l_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_laguerre_polynomial_l_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_laguerre_polynomial_l_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_laguerre_polynomial_l_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_laguerre_polynomial_l_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_legendre_polynomial_p(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_legendre_polynomial_p::redispatch(dispatchKeySet, x, n); + } + + // aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_legendre_polynomial_p(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_legendre_polynomial_p_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_legendre_polynomial_p(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_legendre_polynomial_p_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_legendre_polynomial_p_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_legendre_polynomial_p_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_legendre_polynomial_p_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_legendre_polynomial_p_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_legendre_polynomial_p_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_legendre_polynomial_p_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_legendre_polynomial_p_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_legendre_polynomial_p_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_legendre_polynomial_p_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_legendre_polynomial_p_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_legendre_polynomial_p_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_legendre_polynomial_p_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_modified_bessel_i0(Tensor self) -> Tensor + inline at::Tensor special_modified_bessel_i0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_modified_bessel_i0::redispatch(dispatchKeySet, self); + } + + // aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_i0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_modified_bessel_i0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_i0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_modified_bessel_i0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_i1(Tensor self) -> Tensor + inline at::Tensor special_modified_bessel_i1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_modified_bessel_i1::redispatch(dispatchKeySet, self); + } + + // aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_i1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_modified_bessel_i1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_i1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_modified_bessel_i1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_k0(Tensor self) -> Tensor + inline at::Tensor special_modified_bessel_k0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_modified_bessel_k0::redispatch(dispatchKeySet, self); + } + + // aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_k0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_modified_bessel_k0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_k0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_modified_bessel_k0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_k1(Tensor self) -> Tensor + inline at::Tensor special_modified_bessel_k1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_modified_bessel_k1::redispatch(dispatchKeySet, self); + } + + // aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_k1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_modified_bessel_k1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_k1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_modified_bessel_k1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor + inline at::Tensor special_scaled_modified_bessel_k0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) { + return at::_ops::special_scaled_modified_bessel_k0::redispatch(dispatchKeySet, x); + } + + // aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_scaled_modified_bessel_k0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x) { + return at::_ops::special_scaled_modified_bessel_k0_out::redispatch(dispatchKeySet, x, out); + } + + // aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_scaled_modified_bessel_k0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) { + return at::_ops::special_scaled_modified_bessel_k0_out::redispatch(dispatchKeySet, x, out); + } + + // aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor + inline at::Tensor special_scaled_modified_bessel_k1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) { + return at::_ops::special_scaled_modified_bessel_k1::redispatch(dispatchKeySet, x); + } + + // aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_scaled_modified_bessel_k1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x) { + return at::_ops::special_scaled_modified_bessel_k1_out::redispatch(dispatchKeySet, x, out); + } + + // aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_scaled_modified_bessel_k1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) { + return at::_ops::special_scaled_modified_bessel_k1_out::redispatch(dispatchKeySet, x, out); + } + + // aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_t(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_t::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_t(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_t(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_t_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_t_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_t_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_t_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_t_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_t_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_t_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_t_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_u(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_u(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_u(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_u_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_u_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_u_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_u_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_u_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_u_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_u_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_v(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_v(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_v(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_v_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_v_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_v_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_v_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_v_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_v_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_v_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_w(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_w::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_w(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_w(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_w_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_w_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_w_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_w_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_w_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_w_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_w_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_w_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_spherical_bessel_j0(Tensor x) -> Tensor + inline at::Tensor special_spherical_bessel_j0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) { + return at::_ops::special_spherical_bessel_j0::redispatch(dispatchKeySet, x); + } + + // aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_spherical_bessel_j0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x) { + return at::_ops::special_spherical_bessel_j0_out::redispatch(dispatchKeySet, x, out); + } + + // aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_spherical_bessel_j0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) { + return at::_ops::special_spherical_bessel_j0_out::redispatch(dispatchKeySet, x, out); + } + + // aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor + inline at::Tensor _foobar(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true) { + return at::_ops::_foobar::redispatch(dispatchKeySet, self, arg1, arg2, arg3); + } + + // aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + inline void _fused_adam_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + + // aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + inline void _fused_adam_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam__tensor_lr::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + + // aten::_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + inline void _fused_adamw_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adamw_::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + + // aten::_fused_adamw_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + inline void _fused_adamw_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adamw__tensor_lr::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + + // aten::_fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + inline void _fused_sgd_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_sgd_::redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + } + + // aten::_fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + inline void _fused_sgd_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_sgd__tensor_lr::redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + } + + // aten::_propagate_xla_data(Tensor input, Tensor output) -> () + inline void _propagate_xla_data(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & output) { + return at::_ops::_propagate_xla_data::redispatch(dispatchKeySet, input, output); + } + + // aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _new_zeros_with_same_feature_meta_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims=0) { + return at::_ops::_new_zeros_with_same_feature_meta_out::redispatch(dispatchKeySet, self, other, self_num_batch_dims, out); + } + + // aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _new_zeros_with_same_feature_meta_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) { + return at::_ops::_new_zeros_with_same_feature_meta_out::redispatch(dispatchKeySet, self, other, self_num_batch_dims, out); + } + + // aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _cudnn_ctc_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { + return at::_ops::_cudnn_ctc_loss_out::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1); + } + + // aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _cudnn_ctc_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_cudnn_ctc_loss_out::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1); + } + + // aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cudnn_rnn_flatten_weight_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight_out::redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } + + // aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cudnn_rnn_flatten_weight_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { + return at::_ops::_cudnn_rnn_flatten_weight_out::redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } + + // aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cudnn_rnn_flatten_weight_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight_out::redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } + + // aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cudnn_rnn_flatten_weight_symint_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { + return at::_ops::_cudnn_rnn_flatten_weight_out::redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } + + // aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _cudnn_rnn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state) { + return at::_ops::_cudnn_rnn_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, out0, out1, out2, out3, out4); + } + + // aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _cudnn_rnn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { + return at::_ops::_cudnn_rnn_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, out0, out1, out2, out3, out4); + } + + // aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _cudnn_rnn_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state) { + return at::_ops::_cudnn_rnn_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); + } + + // aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _cudnn_rnn_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { + return at::_ops::_cudnn_rnn_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); + } + + // aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () + inline void _cudnn_rnn_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + + // aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () + inline void _cudnn_rnn_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + return at::_ops::_cudnn_rnn_backward_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + + // aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () + inline void _cudnn_rnn_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + + // aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () + inline void _cudnn_rnn_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + return at::_ops::_cudnn_rnn_backward_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + + // aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cudnn_init_dropout_state_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, double dropout, bool train, int64_t dropout_seed) { + return at::_ops::_cudnn_init_dropout_state_out::redispatch(dispatchKeySet, dropout, train, dropout_seed, out); + } + + // aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cudnn_init_dropout_state_outf(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, at::Tensor & out) { + return at::_ops::_cudnn_init_dropout_state_out::redispatch(dispatchKeySet, dropout, train, dropout_seed, out); + } + + // aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _fused_dropout_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, double p, c10::optional generator=c10::nullopt) { + return at::_ops::_fused_dropout_out::redispatch(dispatchKeySet, self, p, generator, out0, out1); + } + + // aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _fused_dropout_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional generator, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_fused_dropout_out::redispatch(dispatchKeySet, self, p, generator, out0, out1); + } + + // aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _masked_scale_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, double scale) { + return at::_ops::_masked_scale_out::redispatch(dispatchKeySet, self, mask, scale, out); + } + + // aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _masked_scale_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out) { + return at::_ops::_masked_scale_out::redispatch(dispatchKeySet, self, mask, scale, out); + } + + // aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple native_dropout_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, double p, c10::optional train) { + return at::_ops::native_dropout_out::redispatch(dispatchKeySet, input, p, train, out0, out1); + } + + // aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple native_dropout_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, c10::optional train, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::native_dropout_out::redispatch(dispatchKeySet, input, p, train, out0, out1); + } + + // aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & native_dropout_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & mask, double scale) { + return at::_ops::native_dropout_backward_out::redispatch(dispatchKeySet, grad_output, mask, scale, out); + } + + // aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & native_dropout_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, double scale, at::Tensor & out) { + return at::_ops::native_dropout_backward_out::redispatch(dispatchKeySet, grad_output, mask, scale, out); + } + + // aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _conj_physical_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_conj_physical_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _conj_physical_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_conj_physical_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _add_relu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::_add_relu_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _add_relu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::_add_relu_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & add_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::add_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & add_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::add_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & affine_grid_generator_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & theta, at::IntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_out::redispatch(dispatchKeySet, theta, c10::fromIntArrayRefSlow(size), align_corners, out); + } + + // aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & affine_grid_generator_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, at::IntArrayRef size, bool align_corners, at::Tensor & out) { + return at::_ops::affine_grid_generator_out::redispatch(dispatchKeySet, theta, c10::fromIntArrayRefSlow(size), align_corners, out); + } + + // aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & affine_grid_generator_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_out::redispatch(dispatchKeySet, theta, size, align_corners, out); + } + + // aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & affine_grid_generator_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners, at::Tensor & out) { + return at::_ops::affine_grid_generator_out::redispatch(dispatchKeySet, theta, size, align_corners, out); + } + + // aten::_test_functorch_fallback.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_functorch_fallback_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_test_functorch_fallback_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_test_functorch_fallback.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_functorch_fallback_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::_test_functorch_fallback_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bartlett_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length) { + return at::_ops::bartlett_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bartlett_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { + return at::_ops::bartlett_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bartlett_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::bartlett_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bartlett_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::bartlett_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_batch_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) { + return at::_ops::quantized_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, mean, var, eps, output_scale, output_zero_point, out); + } + + // aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_batch_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out) { + return at::_ops::quantized_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, mean, var, eps, output_scale, output_zero_point, out); + } + + // aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bernoulli_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & p, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli_Tensor_out::redispatch(dispatchKeySet, self, p, generator, out); + } + + // aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bernoulli_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & p, c10::optional generator, at::Tensor & out) { + return at::_ops::bernoulli_Tensor_out::redispatch(dispatchKeySet, self, p, generator, out); + } + + // aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor + inline at::Tensor bernoulli(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & p, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli_Tensor::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bernoulli_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double p=0.5, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli_float_out::redispatch(dispatchKeySet, self, p, generator, out); + } + + // aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bernoulli_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional generator, at::Tensor & out) { + return at::_ops::bernoulli_float_out::redispatch(dispatchKeySet, self, p, generator, out); + } + + // aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & binary_cross_entropy_with_logits_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, const c10::optional & pos_weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy_with_logits_out::redispatch(dispatchKeySet, self, target, weight, pos_weight, reduction, out); + } + + // aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & binary_cross_entropy_with_logits_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction, at::Tensor & out) { + return at::_ops::binary_cross_entropy_with_logits_out::redispatch(dispatchKeySet, self, target, weight, pos_weight, reduction, out); + } + + // aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bincount_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & weights={}, int64_t minlength=0) { + return at::_ops::bincount_out::redispatch(dispatchKeySet, self, weights, minlength, out); + } + + // aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bincount_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & weights, int64_t minlength, at::Tensor & out) { + return at::_ops::bincount_out::redispatch(dispatchKeySet, self, weights, minlength, out); + } + + // aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & blackman_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length) { + return at::_ops::blackman_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & blackman_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { + return at::_ops::blackman_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & blackman_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::blackman_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & blackman_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::blackman_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & block_diag_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::block_diag_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & block_diag_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::block_diag_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & constant_pad_nd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(pad), value, out); + } + + // aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & constant_pad_nd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value, at::Tensor & out) { + return at::_ops::constant_pad_nd_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(pad), value, out); + } + + // aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & constant_pad_nd_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd_out::redispatch(dispatchKeySet, self, pad, value, out); + } + + // aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & constant_pad_nd_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) { + return at::_ops::constant_pad_nd_out::redispatch(dispatchKeySet, self, pad, value, out); + } + + // aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_out::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); + } + + // aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) { + return at::_ops::convolution_out::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); + } + + // aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & convolution_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + return at::_ops::convolution_out::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); + } + + // aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & convolution_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out) { + return at::_ops::convolution_out::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); + } + + // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward_out::redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : c10::nullopt, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2); + } + + // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::convolution_backward_out::redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : c10::nullopt, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2); + } + + // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::convolution_backward_out::redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); + } + + // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::convolution_backward_out::redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); + } + + // aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & convolution_overrideable_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_overrideable_out::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); + } + + // aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & convolution_overrideable_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) { + return at::_ops::convolution_overrideable_out::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); + } + + // aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & convolution_overrideable_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + return at::_ops::convolution_overrideable_out::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); + } + + // aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & convolution_overrideable_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out) { + return at::_ops::convolution_overrideable_out::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); + } + + // aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_overrideable_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable_out::redispatch(dispatchKeySet, grad_output, input, weight, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2); + } + + // aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_overrideable_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::convolution_backward_overrideable_out::redispatch(dispatchKeySet, grad_output, input, weight, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2); + } + + // aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_overrideable_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable_out::redispatch(dispatchKeySet, grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); + } + + // aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_overrideable_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::convolution_backward_overrideable_out::redispatch(dispatchKeySet, grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); + } + + // aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { + return at::_ops::_convolution_out::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out); + } + + // aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) { + return at::_ops::_convolution_out::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out); + } + + // aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convolution_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { + return at::_ops::_convolution_out::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out); + } + + // aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convolution_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) { + return at::_ops::_convolution_out::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out); + } + + // aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conv_tbc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad=0) { + return at::_ops::conv_tbc_out::redispatch(dispatchKeySet, self, weight, bias, pad, out); + } + + // aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conv_tbc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) { + return at::_ops::conv_tbc_out::redispatch(dispatchKeySet, self, weight, bias, pad, out); + } + + // aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy_out::redispatch(dispatchKeySet, self, src, non_blocking, out); + } + + // aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) { + return at::_ops::copy_out::redispatch(dispatchKeySet, self, src, non_blocking, out); + } + + // aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _copy_from_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & dst, bool non_blocking=false) { + return at::_ops::_copy_from_out::redispatch(dispatchKeySet, self, dst, non_blocking, out); + } + + // aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _copy_from_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out) { + return at::_ops::_copy_from_out::redispatch(dispatchKeySet, self, dst, non_blocking, out); + } + + // aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _copy_from_and_resize_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & dst) { + return at::_ops::_copy_from_and_resize_out::redispatch(dispatchKeySet, self, dst, out); + } + + // aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _copy_from_and_resize_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, at::Tensor & out) { + return at::_ops::_copy_from_and_resize_out::redispatch(dispatchKeySet, self, dst, out); + } + + // aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & count_nonzero_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::count_nonzero_dim_IntList_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & count_nonzero_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { + return at::_ops::count_nonzero_dim_IntList_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & count_nonzero_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dim=c10::nullopt) { + return at::_ops::count_nonzero_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & count_nonzero_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim, at::Tensor & out) { + return at::_ops::count_nonzero_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_affine_grid_generator_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) { + return at::_ops::cudnn_affine_grid_generator_out::redispatch(dispatchKeySet, theta, N, C, H, W, out); + } + + // aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_affine_grid_generator_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) { + return at::_ops::cudnn_affine_grid_generator_out::redispatch(dispatchKeySet, theta, N, C, H, W, out); + } + + // aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_affine_grid_generator_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) { + return at::_ops::cudnn_affine_grid_generator_backward_out::redispatch(dispatchKeySet, grad, N, C, H, W, out); + } + + // aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_affine_grid_generator_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) { + return at::_ops::cudnn_affine_grid_generator_backward_out::redispatch(dispatchKeySet, grad, N, C, H, W, out); + } + + // aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple cudnn_batch_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::cudnn_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3); + } + + // aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple cudnn_batch_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { + return at::_ops::cudnn_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3); + } + + // aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple cudnn_batch_norm_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const at::Tensor & reserveSpace) { + return at::_ops::cudnn_batch_norm_backward_out::redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2); + } + + // aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple cudnn_batch_norm_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::cudnn_batch_norm_backward_out::redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2); + } + + // aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_transpose_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) { + return at::_ops::cudnn_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, allow_tf32, out); + } + + // aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_transpose_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) { + return at::_ops::cudnn_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, allow_tf32, out); + } + + // aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_transpose_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) { + return at::_ops::cudnn_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); + } + + // aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_transpose_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) { + return at::_ops::cudnn_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); + } + + // aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_convolution_transpose_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } + + // aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_convolution_transpose_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::_mps_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } + + // aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_convolution_transpose_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_mps_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, out); + } + + // aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_convolution_transpose_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) { + return at::_ops::_mps_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, out); + } + + // aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple mps_convolution_transpose_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); + } + + // aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple mps_convolution_transpose_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); + } + + // aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple mps_convolution_transpose_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); + } + + // aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple mps_convolution_transpose_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); + } + + // aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_relu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::cudnn_convolution_relu_out::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups, out); + } + + // aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_relu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::cudnn_convolution_relu_out::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups, out); + } + + // aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_relu_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::cudnn_convolution_relu_out::redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups, out); + } + + // aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_relu_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) { + return at::_ops::cudnn_convolution_relu_out::redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups, out); + } + + // aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_add_relu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::cudnn_convolution_add_relu_out::redispatch(dispatchKeySet, self, weight, z, alpha, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups, out); + } + + // aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_add_relu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::cudnn_convolution_add_relu_out::redispatch(dispatchKeySet, self, weight, z, alpha, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups, out); + } + + // aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_add_relu_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::cudnn_convolution_add_relu_out::redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups, out); + } + + // aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_add_relu_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) { + return at::_ops::cudnn_convolution_add_relu_out::redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups, out); + } + + // aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_grid_sampler_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & grid) { + return at::_ops::cudnn_grid_sampler_out::redispatch(dispatchKeySet, self, grid, out); + } + + // aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_grid_sampler_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, at::Tensor & out) { + return at::_ops::cudnn_grid_sampler_out::redispatch(dispatchKeySet, self, grid, out); + } + + // aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple cudnn_grid_sampler_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) { + return at::_ops::cudnn_grid_sampler_backward_out::redispatch(dispatchKeySet, self, grid, grad_output, out0, out1); + } + + // aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple cudnn_grid_sampler_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::cudnn_grid_sampler_backward_out::redispatch(dispatchKeySet, self, grid, grad_output, out0, out1); + } + + // aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _ctc_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false) { + return at::_ops::_ctc_loss_out::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1); + } + + // aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _ctc_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_ctc_loss_out::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1); + } + + // aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _ctc_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, bool zero_infinity=false) { + return at::_ops::_ctc_loss_Tensor_out::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1); + } + + // aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _ctc_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_ctc_loss_Tensor_out::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1); + } + + // aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _ctc_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false) { + return at::_ops::_ctc_loss_backward_out::redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out); + } + + // aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _ctc_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out) { + return at::_ops::_ctc_loss_backward_out::redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out); + } + + // aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diag_embed_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) { + return at::_ops::diag_embed_out::redispatch(dispatchKeySet, self, offset, dim1, dim2, out); + } + + // aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diag_embed_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diag_embed_out::redispatch(dispatchKeySet, self, offset, dim1, dim2, out); + } + + // aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward_out::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out); + } + + // aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_backward_out::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out); + } + + // aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward_out::redispatch(dispatchKeySet, grad_output, input_sizes, offset, dim1, dim2, out); + } + + // aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_backward_out::redispatch(dispatchKeySet, grad_output, input_sizes, offset, dim1, dim2, out); + } + + // aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::div_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::div_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + return at::_ops::div_Scalar_mode_out::redispatch(dispatchKeySet, self, other, rounding_mode, out); + } + + // aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode, at::Tensor & out) { + return at::_ops::div_Scalar_mode_out::redispatch(dispatchKeySet, self, other, rounding_mode, out); + } + + // aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) { + return at::_ops::embedding_out::redispatch(dispatchKeySet, weight, indices, padding_idx, scale_grad_by_freq, sparse, out); + } + + // aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) { + return at::_ops::embedding_out::redispatch(dispatchKeySet, weight, indices, padding_idx, scale_grad_by_freq, sparse, out); + } + + // aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) { + return at::_ops::embedding_out::redispatch(dispatchKeySet, weight, indices, padding_idx, scale_grad_by_freq, sparse, out); + } + + // aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) { + return at::_ops::embedding_out::redispatch(dispatchKeySet, weight, indices, padding_idx, scale_grad_by_freq, sparse, out); + } + + // aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_dense_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward_out::redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); + } + + // aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_dense_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, at::Tensor & out) { + return at::_ops::embedding_dense_backward_out::redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); + } + + // aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_dense_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward_out::redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); + } + + // aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_dense_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) { + return at::_ops::embedding_dense_backward_out::redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); + } + + // aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_renorm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { + return at::_ops::embedding_renorm_out::redispatch(dispatchKeySet, self, indices, max_norm, norm_type, out); + } + + // aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_renorm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out) { + return at::_ops::embedding_renorm_out::redispatch(dispatchKeySet, self, indices, max_norm, norm_type, out); + } + + // aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor + inline at::Tensor embedding_renorm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { + return at::_ops::embedding_renorm::redispatch(dispatchKeySet, self, indices, max_norm, norm_type); + } + + // aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple _embedding_bag_forward_only_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_forward_only_out::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); + } + + // aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple _embedding_bag_forward_only_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { + return at::_ops::_embedding_bag_forward_only_out::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); + } + + // aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple _embedding_bag_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_out::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); + } + + // aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple _embedding_bag_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { + return at::_ops::_embedding_bag_out::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); + } + + // aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _embedding_bag_dense_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_dense_backward_out::redispatch(dispatchKeySet, grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out); + } + + // aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _embedding_bag_dense_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx, at::Tensor & out) { + return at::_ops::_embedding_bag_dense_backward_out::redispatch(dispatchKeySet, grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out); + } + + // aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _embedding_bag_dense_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_dense_backward_out::redispatch(dispatchKeySet, grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out); + } + + // aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _embedding_bag_dense_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx, at::Tensor & out) { + return at::_ops::_embedding_bag_dense_backward_out::redispatch(dispatchKeySet, grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out); + } + + // aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _embedding_bag_per_sample_weights_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_per_sample_weights_backward_out::redispatch(dispatchKeySet, grad, weight, indices, offsets, offset2bag, mode, padding_idx, out); + } + + // aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _embedding_bag_per_sample_weights_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out) { + return at::_ops::_embedding_bag_per_sample_weights_backward_out::redispatch(dispatchKeySet, grad, weight, indices, offsets, offset2bag, mode, padding_idx, out); + } + + // aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional names, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_names_out::redispatch(dispatchKeySet, size, names, memory_format, out); + } + + // aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, c10::optional memory_format, at::Tensor & out) { + return at::_ops::empty_names_out::redispatch(dispatchKeySet, size, names, memory_format, out); + } + + // aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_permuted_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, at::IntArrayRef physical_layout) { + return at::_ops::empty_permuted_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), physical_layout, out); + } + + // aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_permuted_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::IntArrayRef physical_layout, at::Tensor & out) { + return at::_ops::empty_permuted_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), physical_layout, out); + } + + // aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_permuted_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, at::IntArrayRef physical_layout) { + return at::_ops::empty_permuted_out::redispatch(dispatchKeySet, size, physical_layout, out); + } + + // aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_permuted_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::Tensor & out) { + return at::_ops::empty_permuted_out::redispatch(dispatchKeySet, size, physical_layout, out); + } + + // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::new_empty_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), out); + } + + // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::new_empty_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), out); + } + + // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::new_empty_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::new_empty_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_strided_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::new_empty_strided_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); + } + + // aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_strided_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::new_empty_strided_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); + } + + // aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_strided_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::new_empty_strided_out::redispatch(dispatchKeySet, self, size, stride, out); + } + + // aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_strided_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::new_empty_strided_out::redispatch(dispatchKeySet, self, size, stride, out); + } + + // aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_full_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::new_full_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), fill_value, out); + } + + // aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_full_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::new_full_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), fill_value, out); + } + + // aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_full_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::new_full_out::redispatch(dispatchKeySet, self, size, fill_value, out); + } + + // aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_full_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::new_full_out::redispatch(dispatchKeySet, self, size, fill_value, out); + } + + // aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_zeros_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::new_zeros_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), out); + } + + // aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_zeros_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::new_zeros_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), out); + } + + // aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_zeros_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::new_zeros_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_zeros_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::new_zeros_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_ones_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::new_ones_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), out); + } + + // aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_ones_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::new_ones_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), out); + } + + // aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_ones_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::new_ones_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_ones_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::new_ones_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _empty_affine_quantized_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), scale, zero_point, memory_format, out); + } + + // aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _empty_affine_quantized_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_affine_quantized_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), scale, zero_point, memory_format, out); + } + + // aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _empty_affine_quantized_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized_out::redispatch(dispatchKeySet, size, scale, zero_point, memory_format, out); + } + + // aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _empty_affine_quantized_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_affine_quantized_out::redispatch(dispatchKeySet, size, scale, zero_point, memory_format, out); + } + + // aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _empty_per_channel_affine_quantized_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), scales, zero_points, axis, memory_format, out); + } + + // aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _empty_per_channel_affine_quantized_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_per_channel_affine_quantized_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), scales, zero_points, axis, memory_format, out); + } + + // aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _empty_per_channel_affine_quantized_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized_out::redispatch(dispatchKeySet, size, scales, zero_points, axis, memory_format, out); + } + + // aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _empty_per_channel_affine_quantized_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_per_channel_affine_quantized_out::redispatch(dispatchKeySet, size, scales, zero_points, axis, memory_format, out); + } + + // aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), memory_format, out); + } + + // aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), memory_format, out); + } + + // aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_symint_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_out::redispatch(dispatchKeySet, self, size, memory_format, out); + } + + // aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::redispatch(dispatchKeySet, self, size, memory_format, out); + } + + // aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor resize(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), memory_format); + } + + // aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor resize_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize::redispatch(dispatchKeySet, self, size, memory_format); + } + + // aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & _resize_output_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::Device device) { + return at::_ops::_resize_output_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), device, out); + } + + // aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & _resize_output_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Device device, const at::Tensor & out) { + return at::_ops::_resize_output_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), device, out); + } + + // aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & _resize_output_symint_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) { + return at::_ops::_resize_output_out::redispatch(dispatchKeySet, self, size, device, out); + } + + // aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & _resize_output_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Device device, const at::Tensor & out) { + return at::_ops::_resize_output_out::redispatch(dispatchKeySet, self, size, device, out); + } + + // aten::_resize_output(Tensor self, SymInt[] size, Device device) -> Tensor + inline at::Tensor _resize_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Device device) { + return at::_ops::_resize_output::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), device); + } + + // aten::_resize_output(Tensor self, SymInt[] size, Device device) -> Tensor + inline at::Tensor _resize_output_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) { + return at::_ops::_resize_output::redispatch(dispatchKeySet, self, size, device); + } + + // aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_quantized_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, const at::Tensor & qtensor, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_quantized_out::redispatch(dispatchKeySet, size, qtensor, memory_format, out); + } + + // aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_quantized_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & qtensor, c10::optional memory_format, at::Tensor & out) { + return at::_ops::empty_quantized_out::redispatch(dispatchKeySet, size, qtensor, memory_format, out); + } + + // aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::empty_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_strided_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::empty_strided_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); + } + + // aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_strided_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::empty_strided_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); + } + + // aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_strided_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::empty_strided_out::redispatch(dispatchKeySet, size, stride, out); + } + + // aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_strided_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::empty_strided_out::redispatch(dispatchKeySet, size, stride, out); + } + + // aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fill_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & value) { + return at::_ops::fill_Scalar_out::redispatch(dispatchKeySet, self, value, out); + } + + // aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fill_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value, at::Tensor & out) { + return at::_ops::fill_Scalar_out::redispatch(dispatchKeySet, self, value, out); + } + + // aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fill_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & value) { + return at::_ops::fill_Tensor_out::redispatch(dispatchKeySet, self, value, out); + } + + // aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fill_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value, at::Tensor & out) { + return at::_ops::fill_Tensor_out::redispatch(dispatchKeySet, self, value, out); + } + + // aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & floor_divide_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::floor_divide_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & floor_divide_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::floor_divide_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names) { + return at::_ops::full_names_out::redispatch(dispatchKeySet, size, fill_value, names, out); + } + + // aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, at::Tensor & out) { + return at::_ops::full_names_out::redispatch(dispatchKeySet, size, fill_value, names, out); + } + + // aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & fill_value, c10::optional memory_format=c10::nullopt) { + return at::_ops::full_like_out::redispatch(dispatchKeySet, self, fill_value, memory_format, out); + } + + // aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, c10::optional memory_format, at::Tensor & out) { + return at::_ops::full_like_out::redispatch(dispatchKeySet, self, fill_value, memory_format, out); + } + + // aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & from_file_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::string_view filename, c10::optional shared=c10::nullopt, c10::optional size=0) { + return at::_ops::from_file_out::redispatch(dispatchKeySet, filename, shared, size, out); + } + + // aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & from_file_outf(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, c10::optional shared, c10::optional size, at::Tensor & out) { + return at::_ops::from_file_out::redispatch(dispatchKeySet, filename, shared, size, out); + } + + // aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & grid_sampler_2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_2d_out::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out); + } + + // aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & grid_sampler_2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { + return at::_ops::grid_sampler_2d_out::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out); + } + + // aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple grid_sampler_2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_2d_backward_out::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); + } + + // aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple grid_sampler_2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::grid_sampler_2d_backward_out::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); + } + + // aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _grid_sampler_2d_cpu_fallback_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::_grid_sampler_2d_cpu_fallback_out::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out); + } + + // aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _grid_sampler_2d_cpu_fallback_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { + return at::_ops::_grid_sampler_2d_cpu_fallback_out::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out); + } + + // aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & grid_sampler_3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_3d_out::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out); + } + + // aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & grid_sampler_3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { + return at::_ops::grid_sampler_3d_out::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out); + } + + // aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple grid_sampler_3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_3d_backward_out::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); + } + + // aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple grid_sampler_3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::grid_sampler_3d_backward_out::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); + } + + // aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hann_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length) { + return at::_ops::hann_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hann_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { + return at::_ops::hann_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hann_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::hann_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hann_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::hann_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length) { + return at::_ops::hamming_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { + return at::_ops::hamming_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::hamming_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::hamming_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic, double alpha) { + return at::_ops::hamming_window_periodic_alpha_out::redispatch(dispatchKeySet, window_length, periodic, alpha, out); + } + + // aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, at::Tensor & out) { + return at::_ops::hamming_window_periodic_alpha_out::redispatch(dispatchKeySet, window_length, periodic, alpha, out); + } + + // aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic, double alpha, double beta) { + return at::_ops::hamming_window_periodic_alpha_beta_out::redispatch(dispatchKeySet, window_length, periodic, alpha, beta, out); + } + + // aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) { + return at::_ops::hamming_window_periodic_alpha_beta_out::redispatch(dispatchKeySet, window_length, periodic, alpha, beta, out); + } + + // aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kaiser_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length) { + return at::_ops::kaiser_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kaiser_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { + return at::_ops::kaiser_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kaiser_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::kaiser_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kaiser_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::kaiser_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kaiser_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic, double beta) { + return at::_ops::kaiser_window_beta_out::redispatch(dispatchKeySet, window_length, periodic, beta, out); + } + + // aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kaiser_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, at::Tensor & out) { + return at::_ops::kaiser_window_beta_out::redispatch(dispatchKeySet, window_length, periodic, beta, out); + } + + // aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) { + return at::_ops::native_group_norm_out::redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); + } + + // aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_group_norm_out::redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); + } + + // aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) { + return at::_ops::native_group_norm_out::redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); + } + + // aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_group_norm_out::redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); + } + + // aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array output_mask) { + return at::_ops::native_group_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2); + } + + // aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_group_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2); + } + + // aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask) { + return at::_ops::native_group_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2); + } + + // aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_group_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2); + } + + // aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_put_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::index_put_out::redispatch(dispatchKeySet, self, indices, values, accumulate, out); + } + + // aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_put_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) { + return at::_ops::index_put_out::redispatch(dispatchKeySet, self, indices, values, accumulate, out); + } + + // aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _index_put_impl_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) { + return at::_ops::_index_put_impl_out::redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe, out); + } + + // aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _index_put_impl_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) { + return at::_ops::_index_put_impl_out::redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe, out); + } + + // aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor + inline at::Tensor _index_put_impl(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) { + return at::_ops::_index_put_impl::redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe); + } + + // aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isnan_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::isnan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isnan_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::isnan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps) { + return at::_ops::native_layer_norm_out::redispatch(dispatchKeySet, input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, out0, out1, out2); + } + + // aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_out::redispatch(dispatchKeySet, input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, out0, out1, out2); + } + + // aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps) { + return at::_ops::native_layer_norm_out::redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps, out0, out1, out2); + } + + // aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_out::redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps, out0, out1, out2); + } + + // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); + } + + // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); + } + + // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); + } + + // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); + } + + // aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple linear_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + return at::_ops::linear_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, output_mask, out0, out1, out2); + } + + // aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple linear_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::linear_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, output_mask, out0, out1, out2); + } + + // aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_linear_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias={}) { + return at::_ops::mkldnn_linear_out::redispatch(dispatchKeySet, self, weight, bias, out); + } + + // aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_linear_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::Tensor & out) { + return at::_ops::mkldnn_linear_out::redispatch(dispatchKeySet, self, weight, bias, out); + } + + // aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_linear_backward_input_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) { + return at::_ops::mkldnn_linear_backward_input_out::redispatch(dispatchKeySet, input_size, grad_output, weight, out); + } + + // aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_linear_backward_input_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out) { + return at::_ops::mkldnn_linear_backward_input_out::redispatch(dispatchKeySet, input_size, grad_output, weight, out); + } + + // aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple mkldnn_linear_backward_weights_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) { + return at::_ops::mkldnn_linear_backward_weights_out::redispatch(dispatchKeySet, grad_output, input, weight, bias_defined, out0, out1); + } + + // aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple mkldnn_linear_backward_weights_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mkldnn_linear_backward_weights_out::redispatch(dispatchKeySet, grad_output, input, weight, bias_defined, out0, out1); + } + + // aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple mkldnn_linear_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + return at::_ops::mkldnn_linear_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, output_mask, out0, out1, out2); + } + + // aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple mkldnn_linear_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::mkldnn_linear_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, output_mask, out0, out1, out2); + } + + // aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple matmul_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask) { + return at::_ops::matmul_backward_out::redispatch(dispatchKeySet, grad, self, other, mask, out0, out1); + } + + // aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple matmul_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::matmul_backward_out::redispatch(dispatchKeySet, grad, self, other, mask, out0, out1); + } + + // aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _aminmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self) { + return at::_ops::_aminmax_out::redispatch(dispatchKeySet, self, out0, out1); + } + + // aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _aminmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_aminmax_out::redispatch(dispatchKeySet, self, out0, out1); + } + + // aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _aminmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::_aminmax_dim_out::redispatch(dispatchKeySet, self, dim, keepdim, out0, out1); + } + + // aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _aminmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_aminmax_dim_out::redispatch(dispatchKeySet, self, dim, keepdim, out0, out1); + } + + // aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::max_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::mkldnn_max_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::mkldnn_max_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool3d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::mkldnn_max_pool3d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool3d_backward_out::redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::mkldnn_max_pool3d_backward_out::redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_max_pool1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool1d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_max_pool1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::quantized_max_pool1d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_max_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_max_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::quantized_max_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_max_pool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool3d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_max_pool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::quantized_max_pool3d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & median_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::median_out::redispatch(dispatchKeySet, self, out); + } + + // aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & median_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::median_out::redispatch(dispatchKeySet, self, out); + } + + // aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanmedian_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::nanmedian_out::redispatch(dispatchKeySet, self, out); + } + + // aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanmedian_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::nanmedian_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution_out::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } + + // aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::_mps_convolution_out::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } + + // aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_convolution_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_mps_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, out); + } + + // aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_convolution_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) { + return at::_ops::_mps_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, out); + } + + // aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple mps_convolution_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1, out2); + } + + // aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple mps_convolution_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::mps_convolution_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1, out2); + } + + // aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple mps_convolution_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2); + } + + // aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple mps_convolution_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::mps_convolution_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2); + } + + // aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::mkldnn_convolution_out::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } + + // aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::mkldnn_convolution_out::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } + + // aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_convolution_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::mkldnn_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, out); + } + + // aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_convolution_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) { + return at::_ops::mkldnn_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, out); + } + + // aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple mkldnn_rnn_layer_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) { + return at::_ops::mkldnn_rnn_layer_out::redispatch(dispatchKeySet, input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0, out1, out2, out3); + } + + // aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple mkldnn_rnn_layer_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { + return at::_ops::mkldnn_rnn_layer_out::redispatch(dispatchKeySet, input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0, out1, out2, out3); + } + + // aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!)) + inline ::std::tuple mkldnn_rnn_layer_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) { + return at::_ops::mkldnn_rnn_layer_backward_out::redispatch(dispatchKeySet, input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace, out0, out1, out2, out3, out4, out5, out6); + } + + // aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!)) + inline ::std::tuple mkldnn_rnn_layer_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6) { + return at::_ops::mkldnn_rnn_layer_backward_out::redispatch(dispatchKeySet, input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace, out0, out1, out2, out3, out4, out5, out6); + } + + // aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple miopen_batch_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::miopen_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2); + } + + // aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple miopen_batch_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::miopen_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2); + } + + // aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple miopen_batch_norm_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon) { + return at::_ops::miopen_batch_norm_backward_out::redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, out0, out1, out2); + } + + // aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple miopen_batch_norm_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::miopen_batch_norm_backward_out::redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, out0, out1, out2); + } + + // aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution_out::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); + } + + // aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_convolution_out::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); + } + + // aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_convolution_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } + + // aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_convolution_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } + + // aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_convolution_transpose_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); + } + + // aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_convolution_transpose_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); + } + + // aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_convolution_transpose_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out); + } + + // aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_convolution_transpose_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out); + } + + // aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_depthwise_convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution_out::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); + } + + // aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_depthwise_convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_depthwise_convolution_out::redispatch(dispatchKeySet, self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); + } + + // aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_depthwise_convolution_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } + + // aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_depthwise_convolution_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_depthwise_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } + + // aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple miopen_rnn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state) { + return at::_ops::miopen_rnn_out::redispatch(dispatchKeySet, input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); + } + + // aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple miopen_rnn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { + return at::_ops::miopen_rnn_out::redispatch(dispatchKeySet, input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); + } + + // aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () + inline void miopen_rnn_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::miopen_rnn_backward_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + + // aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () + inline void miopen_rnn_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + return at::_ops::miopen_rnn_backward_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + + // aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_sparse_matmul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_sparse_sparse_matmul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_sparse_matmul_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::_sparse_sparse_matmul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::mul_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mul_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::mul_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out) + inline ::std::tuple _native_batch_norm_legit_functional(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) { + return at::_ops::_native_batch_norm_legit_functional::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps); + } + + // aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _native_batch_norm_legit_no_training_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) { + return at::_ops::_native_batch_norm_legit_no_training_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, momentum, eps, out0, out1, out2); + } + + // aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _native_batch_norm_legit_no_training_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_native_batch_norm_legit_no_training_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, momentum, eps, out0, out1, out2); + } + + // aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_stats_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, double eps) { + return at::_ops::batch_norm_stats_out::redispatch(dispatchKeySet, input, eps, out0, out1); + } + + // aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_stats_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::batch_norm_stats_out::redispatch(dispatchKeySet, input, eps, out0, out1); + } + + // aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_gather_stats_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, int64_t count) { + return at::_ops::batch_norm_gather_stats_out::redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, count, out0, out1); + } + + // aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_gather_stats_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, int64_t count, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::batch_norm_gather_stats_out::redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, count, out0, out1); + } + + // aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_gather_stats_with_counts_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const at::Tensor & counts) { + return at::_ops::batch_norm_gather_stats_with_counts_out::redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1); + } + + // aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_gather_stats_with_counts_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::batch_norm_gather_stats_with_counts_out::redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1); + } + + // aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_batch_norm_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask) { + return at::_ops::native_batch_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2); + } + + // aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_batch_norm_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_batch_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2); + } + + // aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple batch_norm_backward_reduce_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g) { + return at::_ops::batch_norm_backward_reduce_out::redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3); + } + + // aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple batch_norm_backward_reduce_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { + return at::_ops::batch_norm_backward_reduce_out::redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3); + } + + // aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & batch_norm_backward_elemt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count) { + return at::_ops::batch_norm_backward_elemt_out::redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count, out); + } + + // aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & batch_norm_backward_elemt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count, at::Tensor & out) { + return at::_ops::batch_norm_backward_elemt_out::redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count, out); + } + + // aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_update_stats_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum) { + return at::_ops::batch_norm_update_stats_out::redispatch(dispatchKeySet, input, running_mean, running_var, momentum, out0, out1); + } + + // aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_update_stats_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::batch_norm_update_stats_out::redispatch(dispatchKeySet, input, running_mean, running_var, momentum, out0, out1); + } + + // aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nnpack_spatial_convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) { + return at::_ops::_nnpack_spatial_convolution_out::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out); + } + + // aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nnpack_spatial_convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::_nnpack_spatial_convolution_out::redispatch(dispatchKeySet, input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out); + } + + // aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nnpack_spatial_convolution_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) { + return at::_ops::_nnpack_spatial_convolution_out::redispatch(dispatchKeySet, input, weight, bias, padding, stride, out); + } + + // aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nnpack_spatial_convolution_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::_nnpack_spatial_convolution_out::redispatch(dispatchKeySet, input, weight, bias, padding, stride, out); + } + + // aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional names) { + return at::_ops::ones_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::ones_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::ones_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::ones_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _euclidean_dist_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x1, const at::Tensor & x2) { + return at::_ops::_euclidean_dist_out::redispatch(dispatchKeySet, x1, x2, out); + } + + // aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _euclidean_dist_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out) { + return at::_ops::_euclidean_dist_out::redispatch(dispatchKeySet, x1, x2, out); + } + + // aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cdist_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode) { + return at::_ops::_cdist_forward_out::redispatch(dispatchKeySet, x1, x2, p, compute_mode, out); + } + + // aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cdist_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode, at::Tensor & out) { + return at::_ops::_cdist_forward_out::redispatch(dispatchKeySet, x1, x2, p, compute_mode, out); + } + + // aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cdist_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) { + return at::_ops::_cdist_backward_out::redispatch(dispatchKeySet, grad, x1, x2, p, cdist, out); + } + + // aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cdist_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) { + return at::_ops::_cdist_backward_out::redispatch(dispatchKeySet, grad, x1, x2, p, cdist, out); + } + + // aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _pdist_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double p=2) { + return at::_ops::_pdist_forward_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _pdist_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, at::Tensor & out) { + return at::_ops::_pdist_forward_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _pdist_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) { + return at::_ops::_pdist_backward_out::redispatch(dispatchKeySet, grad, self, p, pdist, out); + } + + // aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _pdist_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist, at::Tensor & out) { + return at::_ops::_pdist_backward_out::redispatch(dispatchKeySet, grad, self, p, pdist, out); + } + + // aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pixel_shuffle_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t upscale_factor) { + return at::_ops::pixel_shuffle_out::redispatch(dispatchKeySet, self, upscale_factor, out); + } + + // aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pixel_shuffle_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t upscale_factor, at::Tensor & out) { + return at::_ops::pixel_shuffle_out::redispatch(dispatchKeySet, self, upscale_factor, out); + } + + // aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pixel_unshuffle_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t downscale_factor) { + return at::_ops::pixel_unshuffle_out::redispatch(dispatchKeySet, self, downscale_factor, out); + } + + // aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pixel_unshuffle_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t downscale_factor, at::Tensor & out) { + return at::_ops::pixel_unshuffle_out::redispatch(dispatchKeySet, self, downscale_factor, out); + } + + // aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & channel_shuffle_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t groups) { + return at::_ops::channel_shuffle_out::redispatch(dispatchKeySet, self, groups, out); + } + + // aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & channel_shuffle_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t groups, at::Tensor & out) { + return at::_ops::channel_shuffle_out::redispatch(dispatchKeySet, self, groups, out); + } + + // aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & channel_shuffle_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymInt groups) { + return at::_ops::channel_shuffle_out::redispatch(dispatchKeySet, self, groups, out); + } + + // aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & channel_shuffle_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt groups, at::Tensor & out) { + return at::_ops::channel_shuffle_out::redispatch(dispatchKeySet, self, groups, out); + } + + // aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _pin_memory_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional device=c10::nullopt) { + return at::_ops::_pin_memory_out::redispatch(dispatchKeySet, self, device, out); + } + + // aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _pin_memory_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional device, at::Tensor & out) { + return at::_ops::_pin_memory_out::redispatch(dispatchKeySet, self, device, out); + } + + // aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scalar_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & s) { + return at::_ops::scalar_tensor_out::redispatch(dispatchKeySet, s, out); + } + + // aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scalar_tensor_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, at::Tensor & out) { + return at::_ops::scalar_tensor_out::redispatch(dispatchKeySet, s, out); + } + + // aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional names) { + return at::_ops::rand_names_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), names, out); + } + + // aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::rand_names_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), names, out); + } + + // aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, c10::optional names) { + return at::_ops::rand_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::rand_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::rand_generator_with_names_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, names, out); + } + + // aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::rand_generator_with_names_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, names, out); + } + + // aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::rand_generator_with_names_out::redispatch(dispatchKeySet, size, generator, names, out); + } + + // aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::rand_generator_with_names_out::redispatch(dispatchKeySet, size, generator, names, out); + } + + // aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::rand_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::rand_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_out::redispatch(dispatchKeySet, self, high, memory_format, out); + } + + // aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_out::redispatch(dispatchKeySet, self, high, memory_format, out); + } + + // aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_like_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymInt high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_out::redispatch(dispatchKeySet, self, high, memory_format, out); + } + + // aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_like_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_out::redispatch(dispatchKeySet, self, high, memory_format, out); + } + + // aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t low, int64_t high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype_out::redispatch(dispatchKeySet, self, low, high, memory_format, out); + } + + // aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t low, int64_t high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_low_dtype_out::redispatch(dispatchKeySet, self, low, high, memory_format, out); + } + + // aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_like_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype_out::redispatch(dispatchKeySet, self, low, high, memory_format, out); + } + + // aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_like_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_low_dtype_out::redispatch(dispatchKeySet, self, low, high, memory_format, out); + } + + // aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional names) { + return at::_ops::randn_names_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), names, out); + } + + // aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::randn_names_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), names, out); + } + + // aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, c10::optional names) { + return at::_ops::randn_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::randn_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::randn_generator_with_names_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, names, out); + } + + // aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::randn_generator_with_names_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), generator, names, out); + } + + // aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::randn_generator_with_names_out::redispatch(dispatchKeySet, size, generator, names, out); + } + + // aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::randn_generator_with_names_out::redispatch(dispatchKeySet, size, generator, names, out); + } + + // aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::randn_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randn_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef repeats) { + return at::_ops::repeat_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(repeats), out); + } + + // aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef repeats, at::Tensor & out) { + return at::_ops::repeat_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(repeats), out); + } + + // aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef repeats) { + return at::_ops::repeat_out::redispatch(dispatchKeySet, self, repeats, out); + } + + // aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out) { + return at::_ops::repeat_out::redispatch(dispatchKeySet, self, repeats, out); + } + + // aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_interleave_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor_out::redispatch(dispatchKeySet, repeats, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt, out); + } + + // aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_interleave_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, c10::optional output_size, at::Tensor & out) { + return at::_ops::repeat_interleave_Tensor_out::redispatch(dispatchKeySet, repeats, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt, out); + } + + // aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_interleave_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor_out::redispatch(dispatchKeySet, repeats, output_size, out); + } + + // aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_interleave_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, c10::optional output_size, at::Tensor & out) { + return at::_ops::repeat_interleave_Tensor_out::redispatch(dispatchKeySet, repeats, output_size, out); + } + + // aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mkldnn_reshape_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef shape) { + return at::_ops::_mkldnn_reshape_out::redispatch(dispatchKeySet, self, shape, out); + } + + // aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mkldnn_reshape_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) { + return at::_ops::_mkldnn_reshape_out::redispatch(dispatchKeySet, self, shape, out); + } + + // aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & relu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::relu_out::redispatch(dispatchKeySet, self, out); + } + + // aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & relu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::relu_out::redispatch(dispatchKeySet, self, out); + } + + // aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) { + return at::_ops::select_backward_out::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index, out); + } + + // aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index, at::Tensor & out) { + return at::_ops::select_backward_out::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index, out); + } + + // aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) { + return at::_ops::select_backward_out::redispatch(dispatchKeySet, grad_output, input_sizes, dim, index, out); + } + + // aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) { + return at::_ops::select_backward_out::redispatch(dispatchKeySet, grad_output, input_sizes, dim, index, out); + } + + // aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & celu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & alpha=1.0) { + return at::_ops::celu_out::redispatch(dispatchKeySet, self, alpha, out); + } + + // aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & celu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::celu_out::redispatch(dispatchKeySet, self, alpha, out); + } + + // aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step) { + return at::_ops::slice_backward_out::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, start, end, step, out); + } + + // aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step, at::Tensor & out) { + return at::_ops::slice_backward_out::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, start, end, step, out); + } + + // aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) { + return at::_ops::slice_backward_out::redispatch(dispatchKeySet, grad_output, input_sizes, dim, start, end, step, out); + } + + // aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out) { + return at::_ops::slice_backward_out::redispatch(dispatchKeySet, grad_output, input_sizes, dim, start, end, step, out); + } + + // aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_scatter_out::redispatch(dispatchKeySet, self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); + } + + // aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, int64_t step, at::Tensor & out) { + return at::_ops::slice_scatter_out::redispatch(dispatchKeySet, self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); + } + + // aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_scatter_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_scatter_out::redispatch(dispatchKeySet, self, src, dim, start, end, step, out); + } + + // aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_scatter_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, at::Tensor & out) { + return at::_ops::slice_scatter_out::redispatch(dispatchKeySet, self, src, dim, start, end, step, out); + } + + // aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index) { + return at::_ops::select_scatter_out::redispatch(dispatchKeySet, self, src, dim, index, out); + } + + // aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index, at::Tensor & out) { + return at::_ops::select_scatter_out::redispatch(dispatchKeySet, self, src, dim, index, out); + } + + // aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_scatter_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) { + return at::_ops::select_scatter_out::redispatch(dispatchKeySet, self, src, dim, index, out); + } + + // aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_scatter_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index, at::Tensor & out) { + return at::_ops::select_scatter_out::redispatch(dispatchKeySet, self, src, dim, index, out); + } + + // aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) { + return at::_ops::diagonal_scatter_out::redispatch(dispatchKeySet, self, src, offset, dim1, dim2, out); + } + + // aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_scatter_out::redispatch(dispatchKeySet, self, src, offset, dim1, dim2, out); + } + + // aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter_out::redispatch(dispatchKeySet, self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); + } + + // aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_scatter_out::redispatch(dispatchKeySet, self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); + } + + // aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_scatter_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter_out::redispatch(dispatchKeySet, self, src, size, stride, storage_offset, out); + } + + // aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_scatter_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_scatter_out::redispatch(dispatchKeySet, self, src, size, stride, storage_offset, out); + } + + // aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () + inline void unsafe_split_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::unsafe_split_Tensor_out::redispatch(dispatchKeySet, self, split_size, dim, out); + } + + // aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () + inline void unsafe_split_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) { + return at::_ops::unsafe_split_Tensor_out::redispatch(dispatchKeySet, self, split_size, dim, out); + } + + // aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () + inline void unsafe_split_symint_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) { + return at::_ops::unsafe_split_Tensor_out::redispatch(dispatchKeySet, self, split_size, dim, out); + } + + // aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () + inline void unsafe_split_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { + return at::_ops::unsafe_split_Tensor_out::redispatch(dispatchKeySet, self, split_size, dim, out); + } + + // aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () + inline void unsafe_split_with_sizes_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::unsafe_split_with_sizes_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(split_sizes), dim, out); + } + + // aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () + inline void unsafe_split_with_sizes_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::unsafe_split_with_sizes_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(split_sizes), dim, out); + } + + // aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () + inline void unsafe_split_with_sizes_symint_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::unsafe_split_with_sizes_out::redispatch(dispatchKeySet, self, split_sizes, dim, out); + } + + // aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () + inline void unsafe_split_with_sizes_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::unsafe_split_with_sizes_out::redispatch(dispatchKeySet, self, split_sizes, dim, out); + } + + // aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::sum_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, at::Tensor & out) { + return at::_ops::sum_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple std_mean_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_mean_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out0, out1); + } + + // aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple std_mean_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::std_mean_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out0, out1); + } + + // aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::prod_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, at::Tensor & out) { + return at::_ops::prod_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mkldnn_transpose_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::_mkldnn_transpose_out::redispatch(dispatchKeySet, self, dim0, dim1, out); + } + + // aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mkldnn_transpose_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { + return at::_ops::_mkldnn_transpose_out::redispatch(dispatchKeySet, self, dim0, dim1, out); + } + + // aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & flip_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::flip_out::redispatch(dispatchKeySet, self, dims, out); + } + + // aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & flip_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::flip_out::redispatch(dispatchKeySet, self, dims, out); + } + + // aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & roll_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(shifts), dims, out); + } + + // aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & roll_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::roll_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(shifts), dims, out); + } + + // aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & roll_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll_out::redispatch(dispatchKeySet, self, shifts, dims, out); + } + + // aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & roll_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::roll_out::redispatch(dispatchKeySet, self, shifts, dims, out); + } + + // aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rot90_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t k=1, at::IntArrayRef dims={0,1}) { + return at::_ops::rot90_out::redispatch(dispatchKeySet, self, k, dims, out); + } + + // aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rot90_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::rot90_out::redispatch(dispatchKeySet, self, k, dims, out); + } + + // aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _transform_bias_rescale_qkv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) { + return at::_ops::_transform_bias_rescale_qkv_out::redispatch(dispatchKeySet, qkv, qkv_bias, num_heads, out0, out1, out2); + } + + // aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _transform_bias_rescale_qkv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_transform_bias_rescale_qkv_out::redispatch(dispatchKeySet, qkv, qkv_bias, num_heads, out0, out1, out2); + } + + // aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_from_mask_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & t, const at::Tensor & mask, bool mask_check=true) { + return at::_ops::_nested_tensor_from_mask_out::redispatch(dispatchKeySet, t, mask, mask_check, out); + } + + // aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_from_mask_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out) { + return at::_ops::_nested_tensor_from_mask_out::redispatch(dispatchKeySet, t, mask, mask_check, out); + } + + // aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_from_padded_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213=false) { + return at::_ops::_nested_from_padded_out::redispatch(dispatchKeySet, padded, cpu_nested_shape_example, fuse_transform_0213, out); + } + + // aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_from_padded_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out) { + return at::_ops::_nested_from_padded_out::redispatch(dispatchKeySet, padded, cpu_nested_shape_example, fuse_transform_0213, out); + } + + // aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_size_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_nested_tensor_size_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_size_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_nested_tensor_size_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_strides_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_nested_tensor_strides_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_strides_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_nested_tensor_strides_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_storage_offsets_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_nested_tensor_storage_offsets_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_storage_offsets_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_nested_tensor_storage_offsets_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_from_padded_and_nested_example_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & padded, const at::Tensor & nt_example) { + return at::_ops::_nested_from_padded_and_nested_example_out::redispatch(dispatchKeySet, padded, nt_example, out); + } + + // aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_from_padded_and_nested_example_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out) { + return at::_ops::_nested_from_padded_and_nested_example_out::redispatch(dispatchKeySet, padded, nt_example, out); + } + + // aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_view_from_buffer_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) { + return at::_ops::_nested_view_from_buffer_copy_out::redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets, out); + } + + // aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_view_from_buffer_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets, at::Tensor & out) { + return at::_ops::_nested_view_from_buffer_copy_out::redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets, out); + } + + // aten::_nested_view_from_jagged_copy.out(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_view_from_jagged_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional & lengths={}, int64_t ragged_idx=1) { + return at::_ops::_nested_view_from_jagged_copy_out::redispatch(dispatchKeySet, self, offsets, dummy, lengths, ragged_idx, out); + } + + // aten::_nested_view_from_jagged_copy.out(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_view_from_jagged_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional & lengths, int64_t ragged_idx, at::Tensor & out) { + return at::_ops::_nested_view_from_jagged_copy_out::redispatch(dispatchKeySet, self, offsets, dummy, lengths, ragged_idx, out); + } + + // aten::_nested_get_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_get_values_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_nested_get_values_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_nested_get_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_get_values_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_nested_get_values_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _trilinear_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim=1) { + return at::_ops::_trilinear_out::redispatch(dispatchKeySet, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, out); + } + + // aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _trilinear_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor & out) { + return at::_ops::_trilinear_out::redispatch(dispatchKeySet, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, out); + } + + // aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _unique_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, bool sorted=true, bool return_inverse=false) { + return at::_ops::_unique_out::redispatch(dispatchKeySet, self, sorted, return_inverse, out0, out1); + } + + // aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _unique_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_unique_out::redispatch(dispatchKeySet, self, sorted, return_inverse, out0, out1); + } + + // aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple unique_dim_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, int64_t dim, bool sorted=true, bool return_inverse=false, bool return_counts=false) { + return at::_ops::unique_dim_out::redispatch(dispatchKeySet, self, dim, sorted, return_inverse, return_counts, out0, out1, out2); + } + + // aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple unique_dim_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::unique_dim_out::redispatch(dispatchKeySet, self, dim, sorted, return_inverse, return_counts, out0, out1, out2); + } + + // aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple unique_consecutive_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, bool return_inverse=false, bool return_counts=false, c10::optional dim=c10::nullopt) { + return at::_ops::unique_consecutive_out::redispatch(dispatchKeySet, self, return_inverse, return_counts, dim, out0, out1, out2); + } + + // aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple unique_consecutive_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::unique_consecutive_out::redispatch(dispatchKeySet, self, return_inverse, return_counts, dim, out0, out1, out2); + } + + // aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple unique_dim_consecutive_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, int64_t dim, bool return_inverse=false, bool return_counts=false) { + return at::_ops::unique_dim_consecutive_out::redispatch(dispatchKeySet, self, dim, return_inverse, return_counts, out0, out1, out2); + } + + // aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple unique_dim_consecutive_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::unique_dim_consecutive_out::redispatch(dispatchKeySet, self, dim, return_inverse, return_counts, out0, out1, out2); + } + + // aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _unique2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, bool sorted=true, bool return_inverse=false, bool return_counts=false) { + return at::_ops::_unique2_out::redispatch(dispatchKeySet, self, sorted, return_inverse, return_counts, out0, out1, out2); + } + + // aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _unique2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_unique2_out::redispatch(dispatchKeySet, self, sorted, return_inverse, return_counts, out0, out1, out2); + } + + // aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _unsafe_view_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_unsafe_view_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), out); + } + + // aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _unsafe_view_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_unsafe_view_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), out); + } + + // aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _unsafe_view_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::_unsafe_view_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _unsafe_view_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::_unsafe_view_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple var_mean_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::var_mean_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out0, out1); + } + + // aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple var_mean_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::var_mean_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out0, out1); + } + + // aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _weight_norm_interface_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & v, const at::Tensor & g, int64_t dim=0) { + return at::_ops::_weight_norm_interface_out::redispatch(dispatchKeySet, v, g, dim, out0, out1); + } + + // aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _weight_norm_interface_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_weight_norm_interface_out::redispatch(dispatchKeySet, v, g, dim, out0, out1); + } + + // aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _weight_norm_interface_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { + return at::_ops::_weight_norm_interface_backward_out::redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim, out0, out1); + } + + // aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _weight_norm_interface_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_weight_norm_interface_backward_out::redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim, out0, out1); + } + + // aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional names) { + return at::_ops::zeros_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::zeros_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _efficientzerotensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size) { + return at::_ops::_efficientzerotensor_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), out); + } + + // aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _efficientzerotensor_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_efficientzerotensor_out::redispatch(dispatchKeySet, c10::fromIntArrayRefSlow(size), out); + } + + // aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _efficientzerotensor_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::_efficientzerotensor_out::redispatch(dispatchKeySet, size, out); + } + + // aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _efficientzerotensor_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::_efficientzerotensor_out::redispatch(dispatchKeySet, size, out); + } + + // aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::zeros_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::zeros_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _standard_gamma_grad_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & output) { + return at::_ops::_standard_gamma_grad_out::redispatch(dispatchKeySet, self, output, out); + } + + // aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _standard_gamma_grad_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output, at::Tensor & out) { + return at::_ops::_standard_gamma_grad_out::redispatch(dispatchKeySet, self, output, out); + } + + // aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _standard_gamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_standard_gamma_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _standard_gamma_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::_standard_gamma_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _dirichlet_grad_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) { + return at::_ops::_dirichlet_grad_out::redispatch(dispatchKeySet, x, alpha, total, out); + } + + // aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _dirichlet_grad_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out) { + return at::_ops::_dirichlet_grad_out::redispatch(dispatchKeySet, x, alpha, total, out); + } + + // aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sample_dirichlet_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_sample_dirichlet_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sample_dirichlet_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::_sample_dirichlet_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & poisson_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::poisson_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & poisson_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::poisson_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & binomial_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & count, const at::Tensor & prob, c10::optional generator=c10::nullopt) { + return at::_ops::binomial_out::redispatch(dispatchKeySet, count, prob, generator, out); + } + + // aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & binomial_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & count, const at::Tensor & prob, c10::optional generator, at::Tensor & out) { + return at::_ops::binomial_out::redispatch(dispatchKeySet, count, prob, generator, out); + } + + // aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & native_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & p=2) { + return at::_ops::native_norm_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & native_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, at::Tensor & out) { + return at::_ops::native_norm_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & native_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, c10::optional dtype) { + return at::_ops::native_norm_ScalarOpt_dim_dtype_out::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out); + } + + // aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & native_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::native_norm_ScalarOpt_dim_dtype_out::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out); + } + + // aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_sum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::_sparse_sum_dim_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_sum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { + return at::_ops::_sparse_sum_dim_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_sum_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::_sparse_sum_backward_out::redispatch(dispatchKeySet, grad, self, dim, out); + } + + // aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_sum_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { + return at::_ops::_sparse_sum_backward_out::redispatch(dispatchKeySet, grad, self, dim, out); + } + + // aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_csr_sum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_csr_sum_dim_dtype_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_csr_sum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::_sparse_csr_sum_dim_dtype_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_csr_prod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_csr_prod_dim_dtype_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_csr_prod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::_sparse_csr_prod_dim_dtype_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_sparse_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { + return at::_ops::_sparse_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_softmax_backward_data_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + return at::_ops::_sparse_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, self, out); + } + + // aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_softmax_backward_data_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_sparse_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, self, out); + } + + // aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_log_softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_sparse_log_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_log_softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { + return at::_ops::_sparse_log_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_log_softmax_backward_data_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + return at::_ops::_sparse_log_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, self, out); + } + + // aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_log_softmax_backward_data_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_sparse_log_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, self, out); + } + + // aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _spdiags_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional layout=c10::nullopt) { + return at::_ops::_spdiags_out::redispatch(dispatchKeySet, diagonals, offsets, shape, layout, out); + } + + // aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _spdiags_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional layout, at::Tensor & out) { + return at::_ops::_spdiags_out::redispatch(dispatchKeySet, diagonals, offsets, shape, layout, out); + } + + // aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::ScalarType dtype) { + return at::_ops::norm_ScalarOpt_dtype_out::redispatch(dispatchKeySet, self, p, dtype, out); + } + + // aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::norm_ScalarOpt_dtype_out::redispatch(dispatchKeySet, self, p, dtype, out); + } + + // aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & p=2) { + return at::_ops::norm_Scalar_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, at::Tensor & out) { + return at::_ops::norm_Scalar_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clone_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::clone_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clone_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::clone_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_as_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, const at::Tensor & the_template, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_as_out::redispatch(dispatchKeySet, self, the_template, memory_format, out); + } + + // aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_as_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_as_out::redispatch(dispatchKeySet, self, the_template, memory_format, out); + } + + // aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor resize_as(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_as::redispatch(dispatchKeySet, self, the_template, memory_format); + } + + // aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_as_sparse_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, const at::Tensor & the_template) { + return at::_ops::resize_as_sparse_out::redispatch(dispatchKeySet, self, the_template, out); + } + + // aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_as_sparse_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out) { + return at::_ops::resize_as_sparse_out::redispatch(dispatchKeySet, self, the_template, out); + } + + // aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor + inline at::Tensor resize_as_sparse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template) { + return at::_ops::resize_as_sparse::redispatch(dispatchKeySet, self, the_template); + } + + // aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zero_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::zero_out::redispatch(dispatchKeySet, self, out); + } + + // aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zero_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::zero_out::redispatch(dispatchKeySet, self, out); + } + + // aten::zero(Tensor self) -> Tensor + inline at::Tensor zero(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::zero::redispatch(dispatchKeySet, self); + } + + // aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sub_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::sub_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sub_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::sub_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rsub_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Tensor_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rsub_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::rsub_Tensor_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rsub_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rsub_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::rsub_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_addmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::_sparse_addmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_addmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::_sparse_addmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sparse_coo_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size) { + return at::_ops::sparse_coo_tensor_size_out::redispatch(dispatchKeySet, size, out); + } + + // aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sparse_coo_tensor_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::sparse_coo_tensor_size_out::redispatch(dispatchKeySet, size, out); + } + + // aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_coo_tensor_with_dims_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size) { + return at::_ops::_sparse_coo_tensor_with_dims_out::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, out); + } + + // aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_coo_tensor_with_dims_outf(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_sparse_coo_tensor_with_dims_out::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, out); + } + + // aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::redispatch(dispatchKeySet, sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, is_coalesced, out); + } + + // aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced, at::Tensor & out) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::redispatch(dispatchKeySet, sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, is_coalesced, out); + } + + // aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, indices, values, is_coalesced, out); + } + + // aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_outf(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced, at::Tensor & out) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, indices, values, is_coalesced, out); + } + + // aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & sparse_resize_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + return at::_ops::sparse_resize_out::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim, out); + } + + // aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & sparse_resize_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) { + return at::_ops::sparse_resize_out::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim, out); + } + + // aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor + inline at::Tensor sparse_resize(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + return at::_ops::sparse_resize::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim); + } + + // aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & sparse_resize_and_clear_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + return at::_ops::sparse_resize_and_clear_out::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim, out); + } + + // aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & sparse_resize_and_clear_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) { + return at::_ops::sparse_resize_and_clear_out::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim, out); + } + + // aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor + inline at::Tensor sparse_resize_and_clear(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + return at::_ops::sparse_resize_and_clear::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim); + } + + // aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sparse_mask_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask) { + return at::_ops::sparse_mask_out::redispatch(dispatchKeySet, self, mask, out); + } + + // aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sparse_mask_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) { + return at::_ops::sparse_mask_out::redispatch(dispatchKeySet, self, mask, out); + } + + // aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_mask_projection_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches=false) { + return at::_ops::_sparse_mask_projection_out::redispatch(dispatchKeySet, self, mask, accumulate_matches, out); + } + + // aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_mask_projection_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out) { + return at::_ops::_sparse_mask_projection_out::redispatch(dispatchKeySet, self, mask, accumulate_matches, out); + } + + // aten::_to_dense.out(Tensor self, ScalarType? dtype=None, bool? masked_grad=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_dense_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dtype=c10::nullopt, c10::optional masked_grad=c10::nullopt) { + return at::_ops::_to_dense_out::redispatch(dispatchKeySet, self, dtype, masked_grad, out); + } + + // aten::_to_dense.out(Tensor self, ScalarType? dtype=None, bool? masked_grad=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_dense_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional masked_grad, at::Tensor & out) { + return at::_ops::_to_dense_out::redispatch(dispatchKeySet, self, dtype, masked_grad, out); + } + + // aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _coalesce_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_coalesce_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _coalesce_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_coalesce_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _coalesced_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool coalesced) { + return at::_ops::_coalesced_out::redispatch(dispatchKeySet, self, coalesced, out); + } + + // aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _coalesced_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced, at::Tensor & out) { + return at::_ops::_coalesced_out::redispatch(dispatchKeySet, self, coalesced, out); + } + + // aten::_coalesced(Tensor self, bool coalesced) -> Tensor + inline at::Tensor _coalesced(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced) { + return at::_ops::_coalesced::redispatch(dispatchKeySet, self, coalesced); + } + + // aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copy_sparse_to_sparse_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy_sparse_to_sparse_out::redispatch(dispatchKeySet, self, src, non_blocking, out); + } + + // aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copy_sparse_to_sparse_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) { + return at::_ops::copy_sparse_to_sparse_out::redispatch(dispatchKeySet, self, src, non_blocking, out); + } + + // aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor + inline at::Tensor copy_sparse_to_sparse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy_sparse_to_sparse::redispatch(dispatchKeySet, self, src, non_blocking); + } + + // aten::_to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_sparse_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t sparse_dim) { + return at::_ops::_to_sparse_sparse_dim_out::redispatch(dispatchKeySet, self, sparse_dim, out); + } + + // aten::_to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_sparse_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sparse_dim, at::Tensor & out) { + return at::_ops::_to_sparse_sparse_dim_out::redispatch(dispatchKeySet, self, sparse_dim, out); + } + + // aten::_to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_sparse_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional layout=c10::nullopt, at::OptionalIntArrayRef blocksize=c10::nullopt, c10::optional dense_dim=c10::nullopt) { + return at::_ops::_to_sparse_out::redispatch(dispatchKeySet, self, layout, blocksize, dense_dim, out); + } + + // aten::_to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_sparse_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional layout, at::OptionalIntArrayRef blocksize, c10::optional dense_dim, at::Tensor & out) { + return at::_ops::_to_sparse_out::redispatch(dispatchKeySet, self, layout, blocksize, dense_dim, out); + } + + // aten::_to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_sparse_csr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dense_dim=c10::nullopt) { + return at::_ops::_to_sparse_csr_out::redispatch(dispatchKeySet, self, dense_dim, out); + } + + // aten::_to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_sparse_csr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dense_dim, at::Tensor & out) { + return at::_ops::_to_sparse_csr_out::redispatch(dispatchKeySet, self, dense_dim, out); + } + + // aten::_to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_sparse_csc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dense_dim=c10::nullopt) { + return at::_ops::_to_sparse_csc_out::redispatch(dispatchKeySet, self, dense_dim, out); + } + + // aten::_to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_sparse_csc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dense_dim, at::Tensor & out) { + return at::_ops::_to_sparse_csc_out::redispatch(dispatchKeySet, self, dense_dim, out); + } + + // aten::_to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_sparse_bsr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim=c10::nullopt) { + return at::_ops::_to_sparse_bsr_out::redispatch(dispatchKeySet, self, blocksize, dense_dim, out); + } + + // aten::_to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_sparse_bsr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim, at::Tensor & out) { + return at::_ops::_to_sparse_bsr_out::redispatch(dispatchKeySet, self, blocksize, dense_dim, out); + } + + // aten::_to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_sparse_bsc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim=c10::nullopt) { + return at::_ops::_to_sparse_bsc_out::redispatch(dispatchKeySet, self, blocksize, dense_dim, out); + } + + // aten::_to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_sparse_bsc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim, at::Tensor & out) { + return at::_ops::_to_sparse_bsc_out::redispatch(dispatchKeySet, self, blocksize, dense_dim, out); + } + + // aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_mkldnn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::to_mkldnn_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_mkldnn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, at::Tensor & out) { + return at::_ops::to_mkldnn_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_reorder_conv2d_weight_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=c10::nullopt) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*input_size)) : c10::nullopt, out); + } + + // aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_reorder_conv2d_weight_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*input_size)) : c10::nullopt, out); + } + + // aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_reorder_conv2d_weight_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1, at::OptionalSymIntArrayRef input_size=c10::nullopt) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::redispatch(dispatchKeySet, self, padding, stride, dilation, groups, input_size, out); + } + + // aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_reorder_conv2d_weight_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::redispatch(dispatchKeySet, self, padding, stride, dilation, groups, input_size, out); + } + + // aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_reorder_conv3d_weight_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } + + // aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_reorder_conv3d_weight_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } + + // aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_reorder_conv3d_weight_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::redispatch(dispatchKeySet, self, padding, stride, dilation, groups, out); + } + + // aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_reorder_conv3d_weight_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::redispatch(dispatchKeySet, self, padding, stride, dilation, groups, out); + } + + // aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_tensor_dynamic_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::ScalarType dtype, bool reduce_range) { + return at::_ops::quantize_per_tensor_dynamic_out::redispatch(dispatchKeySet, self, dtype, reduce_range, out); + } + + // aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_tensor_dynamic_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out) { + return at::_ops::quantize_per_tensor_dynamic_out::redispatch(dispatchKeySet, self, dtype, reduce_range, out); + } + + // aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_out::redispatch(dispatchKeySet, self, scale, zero_point, dtype, out); + } + + // aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_tensor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::quantize_per_tensor_out::redispatch(dispatchKeySet, self, scale, zero_point, dtype, out); + } + + // aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensor_qparams_out::redispatch(dispatchKeySet, self, scale, zero_point, dtype, out); + } + + // aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_tensor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::quantize_per_tensor_tensor_qparams_out::redispatch(dispatchKeySet, self, scale, zero_point, dtype, out); + } + + // aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () + inline void quantize_per_tensor_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensors_out::redispatch(dispatchKeySet, tensors, scales, zero_points, dtype, out); + } + + // aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () + inline void quantize_per_tensor_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) { + return at::_ops::quantize_per_tensor_tensors_out::redispatch(dispatchKeySet, tensors, scales, zero_points, dtype, out); + } + + // aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_channel_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) { + return at::_ops::quantize_per_channel_out::redispatch(dispatchKeySet, self, scales, zero_points, axis, dtype, out); + } + + // aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_channel_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::quantize_per_channel_out::redispatch(dispatchKeySet, self, scales, zero_points, axis, dtype, out); + } + + // aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dequantize_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::dequantize_self_out::redispatch(dispatchKeySet, self, out); + } + + // aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dequantize_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::dequantize_self_out::redispatch(dispatchKeySet, self, out); + } + + // aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> () + inline void dequantize_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList tensors) { + return at::_ops::dequantize_tensors_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> () + inline void dequantize_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::TensorList out) { + return at::_ops::dequantize_tensors_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & q_per_channel_scales_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::q_per_channel_scales_out::redispatch(dispatchKeySet, self, out); + } + + // aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & q_per_channel_scales_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::q_per_channel_scales_out::redispatch(dispatchKeySet, self, out); + } + + // aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & q_per_channel_zero_points_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::q_per_channel_zero_points_out::redispatch(dispatchKeySet, self, out); + } + + // aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & q_per_channel_zero_points_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::q_per_channel_zero_points_out::redispatch(dispatchKeySet, self, out); + } + + // aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & int_repr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::int_repr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & int_repr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::int_repr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _make_per_tensor_quantized_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double scale, int64_t zero_point) { + return at::_ops::_make_per_tensor_quantized_tensor_out::redispatch(dispatchKeySet, self, scale, zero_point, out); + } + + // aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _make_per_tensor_quantized_tensor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out) { + return at::_ops::_make_per_tensor_quantized_tensor_out::redispatch(dispatchKeySet, self, scale, zero_point, out); + } + + // aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _make_per_channel_quantized_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) { + return at::_ops::_make_per_channel_quantized_tensor_out::redispatch(dispatchKeySet, self, scale, zero_point, axis, out); + } + + // aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _make_per_channel_quantized_tensor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out) { + return at::_ops::_make_per_channel_quantized_tensor_out::redispatch(dispatchKeySet, self, scale, zero_point, axis, out); + } + + // aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fake_quantize_per_tensor_affine_cachemask_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_tensor_affine_cachemask_out::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, out0, out1); + } + + // aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fake_quantize_per_tensor_affine_cachemask_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::fake_quantize_per_tensor_affine_cachemask_out::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, out0, out1); + } + + // aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) { + return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::redispatch(dispatchKeySet, self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1); + } + + // aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::redispatch(dispatchKeySet, self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1); + } + + // aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fake_quantize_learnable_per_tensor_affine_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) { + return at::_ops::_fake_quantize_learnable_per_tensor_affine_out::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, grad_factor, out); + } + + // aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fake_quantize_learnable_per_tensor_affine_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) { + return at::_ops::_fake_quantize_learnable_per_tensor_affine_out::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, grad_factor, out); + } + + // aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fake_quantize_per_channel_affine_cachemask_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_channel_affine_cachemask_out::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, out0, out1); + } + + // aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fake_quantize_per_channel_affine_cachemask_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::fake_quantize_per_channel_affine_cachemask_out::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, out0, out1); + } + + // aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fake_quantize_learnable_per_channel_affine_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) { + return at::_ops::_fake_quantize_learnable_per_channel_affine_out::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, grad_factor, out); + } + + // aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fake_quantize_learnable_per_channel_affine_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) { + return at::_ops::_fake_quantize_learnable_per_channel_affine_out::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, grad_factor, out); + } + + // aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!)) + inline ::std::tuple _fused_moving_avg_obs_fq_helper_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::_fused_moving_avg_obs_fq_helper_out::redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1); + } + + // aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!)) + inline ::std::tuple _fused_moving_avg_obs_fq_helper_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_fused_moving_avg_obs_fq_helper_out::redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1); + } + + // aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out) + inline ::std::tuple _fused_moving_avg_obs_fq_helper_functional(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::_fused_moving_avg_obs_fq_helper_functional::redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); + } + + // aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool non_blocking=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::_to_copy_out::redispatch(dispatchKeySet, self, non_blocking, memory_format, out); + } + + // aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_to_copy_out::redispatch(dispatchKeySet, self, non_blocking, memory_format, out); + } + + // aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!)) + inline ::std::tuple _lstm_mps_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::_lstm_mps_out::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4, out5); + } + + // aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!)) + inline ::std::tuple _lstm_mps_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5) { + return at::_ops::_lstm_mps_out::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4, out5); + } + + // aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () + inline void lstm_mps_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::TensorList out1, at::TensorList out2, const c10::optional & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_mps_backward_out::redispatch(dispatchKeySet, grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2); + } + + // aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () + inline void lstm_mps_backward_outf(c10::DispatchKeySet dispatchKeySet, const c10::optional & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) { + return at::_ops::lstm_mps_backward_out::redispatch(dispatchKeySet, grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2); + } + + // aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _thnn_fused_lstm_cell_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional & input_bias={}, const c10::optional & hidden_bias={}) { + return at::_ops::_thnn_fused_lstm_cell_out::redispatch(dispatchKeySet, input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2); + } + + // aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _thnn_fused_lstm_cell_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional & input_bias, const c10::optional & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_thnn_fused_lstm_cell_out::redispatch(dispatchKeySet, input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2); + } + + // aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _thnn_fused_lstm_cell_backward_impl_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_lstm_cell_backward_impl_out::redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2); + } + + // aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _thnn_fused_lstm_cell_backward_impl_outf(c10::DispatchKeySet dispatchKeySet, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_thnn_fused_lstm_cell_backward_impl_out::redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2); + } + + // aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _thnn_fused_gru_cell_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional & input_bias={}, const c10::optional & hidden_bias={}) { + return at::_ops::_thnn_fused_gru_cell_out::redispatch(dispatchKeySet, input_gates, hidden_gates, hx, input_bias, hidden_bias, out0, out1); + } + + // aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _thnn_fused_gru_cell_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_thnn_fused_gru_cell_out::redispatch(dispatchKeySet, input_gates, hidden_gates, hx, input_bias, hidden_bias, out0, out1); + } + + // aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _thnn_fused_gru_cell_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_gru_cell_backward_out::redispatch(dispatchKeySet, grad_hy, workspace, has_bias, out0, out1, out2, out3, out4); + } + + // aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _thnn_fused_gru_cell_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { + return at::_ops::_thnn_fused_gru_cell_backward_out::redispatch(dispatchKeySet, grad_hy, workspace, has_bias, out0, out1, out2, out3, out4); + } + + // aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _pack_padded_sequence_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & lengths, bool batch_first) { + return at::_ops::_pack_padded_sequence_out::redispatch(dispatchKeySet, input, lengths, batch_first, out0, out1); + } + + // aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _pack_padded_sequence_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_pack_padded_sequence_out::redispatch(dispatchKeySet, input, lengths, batch_first, out0, out1); + } + + // aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Storage source) { + return at::_ops::set_source_Storage_out::redispatch(dispatchKeySet, self, source, out); + } + + // aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, at::Tensor & out) { + return at::_ops::set_source_Storage_out::redispatch(dispatchKeySet, self, source, out); + } + + // aten::set.source_Storage(Tensor self, Storage source) -> Tensor + inline at::Tensor set(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source) { + return at::_ops::set_source_Storage::redispatch(dispatchKeySet, self, source); + } + + // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset_out::redispatch(dispatchKeySet, self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); + } + + // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::set_source_Storage_storage_offset_out::redispatch(dispatchKeySet, self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); + } + + // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset_out::redispatch(dispatchKeySet, self, source, storage_offset, size, stride, out); + } + + // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::set_source_Storage_storage_offset_out::redispatch(dispatchKeySet, self, source, storage_offset, size, stride, out); + } + + // aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor + inline at::Tensor set(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset::redispatch(dispatchKeySet, self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); + } + + // aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor + inline at::Tensor set_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset::redispatch(dispatchKeySet, self, source, storage_offset, size, stride); + } + + // aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & source) { + return at::_ops::set_source_Tensor_out::redispatch(dispatchKeySet, self, source, out); + } + + // aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & source, at::Tensor & out) { + return at::_ops::set_source_Tensor_out::redispatch(dispatchKeySet, self, source, out); + } + + // aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor + inline at::Tensor set(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & source) { + return at::_ops::set_source_Tensor::redispatch(dispatchKeySet, self, source); + } + + // aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::set_out::redispatch(dispatchKeySet, self, out); + } + + // aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::set_out::redispatch(dispatchKeySet, self, out); + } + + // aten::set(Tensor self) -> Tensor + inline at::Tensor set(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::set::redispatch(dispatchKeySet, self); + } + + // aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::lift_out::redispatch(dispatchKeySet, self, out); + } + + // aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lift_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::lift_out::redispatch(dispatchKeySet, self, out); + } + + // aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lift_fresh_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::lift_fresh_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lift_fresh_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::lift_fresh_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_fill_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { + return at::_ops::masked_fill_Scalar_out::redispatch(dispatchKeySet, self, mask, value, out); + } + + // aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_fill_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) { + return at::_ops::masked_fill_Scalar_out::redispatch(dispatchKeySet, self, mask, value, out); + } + + // aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_fill_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { + return at::_ops::masked_fill_Tensor_out::redispatch(dispatchKeySet, self, mask, value, out); + } + + // aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_fill_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out) { + return at::_ops::masked_fill_Tensor_out::redispatch(dispatchKeySet, self, mask, value, out); + } + + // aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { + return at::_ops::masked_scatter_out::redispatch(dispatchKeySet, self, mask, source, out); + } + + // aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out) { + return at::_ops::masked_scatter_out::redispatch(dispatchKeySet, self, mask, source, out); + } + + // aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _masked_softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, c10::optional dim=c10::nullopt, c10::optional mask_type=c10::nullopt) { + return at::_ops::_masked_softmax_out::redispatch(dispatchKeySet, self, mask, dim, mask_type, out); + } + + // aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _masked_softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, c10::optional dim, c10::optional mask_type, at::Tensor & out) { + return at::_ops::_masked_softmax_out::redispatch(dispatchKeySet, self, mask, dim, mask_type, out); + } + + // aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _masked_softmax_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim=c10::nullopt) { + return at::_ops::_masked_softmax_backward_out::redispatch(dispatchKeySet, grad_output, output, mask, dim, out); + } + + // aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _masked_softmax_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim, at::Tensor & out) { + return at::_ops::_masked_softmax_backward_out::redispatch(dispatchKeySet, grad_output, output, mask, dim, out); + } + + // aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & put_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false) { + return at::_ops::put_out::redispatch(dispatchKeySet, self, index, source, accumulate, out); + } + + // aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & put_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out) { + return at::_ops::put_out::redispatch(dispatchKeySet, self, index, source, accumulate, out); + } + + // aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_fill_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_int_Scalar_out::redispatch(dispatchKeySet, self, dim, index, value, out); + } + + // aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_fill_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { + return at::_ops::index_fill_int_Scalar_out::redispatch(dispatchKeySet, self, dim, index, value, out); + } + + // aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_fill_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_int_Tensor_out::redispatch(dispatchKeySet, self, dim, index, value, out); + } + + // aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_fill_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) { + return at::_ops::index_fill_int_Tensor_out::redispatch(dispatchKeySet, self, dim, index, value, out); + } + + // aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_and_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_and_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_and_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_and_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_or_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_or_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_or_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_or_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_xor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_xor_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_xor_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_xor_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __lshift___out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__lshift___Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __lshift___outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::__lshift___Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __lshift___out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__lshift___Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __lshift___outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::__lshift___Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_left_shift_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_left_shift_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __rshift___out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__rshift___Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __rshift___outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::__rshift___Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __rshift___out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__rshift___Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __rshift___outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::__rshift___Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_right_shift_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_right_shift_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & random_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t from, c10::optional to, c10::optional generator=c10::nullopt) { + return at::_ops::random_from_out::redispatch(dispatchKeySet, self, from, to, generator, out); + } + + // aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & random_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t from, c10::optional to, c10::optional generator, at::Tensor & out) { + return at::_ops::random_from_out::redispatch(dispatchKeySet, self, from, to, generator, out); + } + + // aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor + inline at::Tensor random(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t from, c10::optional to, c10::optional generator=c10::nullopt) { + return at::_ops::random_from::redispatch(dispatchKeySet, self, from, to, generator); + } + + // aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & random_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t to, c10::optional generator=c10::nullopt) { + return at::_ops::random_to_out::redispatch(dispatchKeySet, self, to, generator, out); + } + + // aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & random_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t to, c10::optional generator, at::Tensor & out) { + return at::_ops::random_to_out::redispatch(dispatchKeySet, self, to, generator, out); + } + + // aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor + inline at::Tensor random(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t to, c10::optional generator=c10::nullopt) { + return at::_ops::random_to::redispatch(dispatchKeySet, self, to, generator); + } + + // aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & random_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::random_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & random_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::random_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::random(Tensor self, *, Generator? generator=None) -> Tensor + inline at::Tensor random(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::random::redispatch(dispatchKeySet, self, generator); + } + + // aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & uniform_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double from=0, double to=1, c10::optional generator=c10::nullopt) { + return at::_ops::uniform_out::redispatch(dispatchKeySet, self, from, to, generator, out); + } + + // aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & uniform_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, c10::optional generator, at::Tensor & out) { + return at::_ops::uniform_out::redispatch(dispatchKeySet, self, from, to, generator, out); + } + + // aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor + inline at::Tensor uniform(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from=0, double to=1, c10::optional generator=c10::nullopt) { + return at::_ops::uniform::redispatch(dispatchKeySet, self, from, to, generator); + } + + // aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cauchy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double median=0, double sigma=1, c10::optional generator=c10::nullopt) { + return at::_ops::cauchy_out::redispatch(dispatchKeySet, self, median, sigma, generator, out); + } + + // aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cauchy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double median, double sigma, c10::optional generator, at::Tensor & out) { + return at::_ops::cauchy_out::redispatch(dispatchKeySet, self, median, sigma, generator, out); + } + + // aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor + inline at::Tensor cauchy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double median=0, double sigma=1, c10::optional generator=c10::nullopt) { + return at::_ops::cauchy::redispatch(dispatchKeySet, self, median, sigma, generator); + } + + // aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_normal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double mean=1, double std=2, c10::optional generator=c10::nullopt) { + return at::_ops::log_normal_out::redispatch(dispatchKeySet, self, mean, std, generator, out); + } + + // aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_normal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional generator, at::Tensor & out) { + return at::_ops::log_normal_out::redispatch(dispatchKeySet, self, mean, std, generator, out); + } + + // aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor + inline at::Tensor log_normal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean=1, double std=2, c10::optional generator=c10::nullopt) { + return at::_ops::log_normal::redispatch(dispatchKeySet, self, mean, std, generator); + } + + // aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & exponential_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double lambd=1, c10::optional generator=c10::nullopt) { + return at::_ops::exponential_out::redispatch(dispatchKeySet, self, lambd, generator, out); + } + + // aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & exponential_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd, c10::optional generator, at::Tensor & out) { + return at::_ops::exponential_out::redispatch(dispatchKeySet, self, lambd, generator, out); + } + + // aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor + inline at::Tensor exponential(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd=1, c10::optional generator=c10::nullopt) { + return at::_ops::exponential::redispatch(dispatchKeySet, self, lambd, generator); + } + + // aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & geometric_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double p, c10::optional generator=c10::nullopt) { + return at::_ops::geometric_out::redispatch(dispatchKeySet, self, p, generator, out); + } + + // aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & geometric_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional generator, at::Tensor & out) { + return at::_ops::geometric_out::redispatch(dispatchKeySet, self, p, generator, out); + } + + // aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor + inline at::Tensor geometric(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional generator=c10::nullopt) { + return at::_ops::geometric::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tril_indices_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t row, int64_t col, int64_t offset=0) { + return at::_ops::tril_indices_out::redispatch(dispatchKeySet, row, col, offset, out); + } + + // aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tril_indices_outf(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, at::Tensor & out) { + return at::_ops::tril_indices_out::redispatch(dispatchKeySet, row, col, offset, out); + } + + // aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & triu_indices_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t row, int64_t col, int64_t offset=0) { + return at::_ops::triu_indices_out::redispatch(dispatchKeySet, row, col, offset, out); + } + + // aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & triu_indices_outf(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, at::Tensor & out) { + return at::_ops::triu_indices_out::redispatch(dispatchKeySet, row, col, offset, out); + } + + // aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & trace_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::trace_out::redispatch(dispatchKeySet, self, out); + } + + // aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & trace_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::trace_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cholesky_solve_helper_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & A, bool upper) { + return at::_ops::_cholesky_solve_helper_out::redispatch(dispatchKeySet, self, A, upper, out); + } + + // aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cholesky_solve_helper_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out) { + return at::_ops::_cholesky_solve_helper_out::redispatch(dispatchKeySet, self, A, upper, out); + } + + // aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dist_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p=2) { + return at::_ops::dist_out::redispatch(dispatchKeySet, self, other, p, out); + } + + // aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dist_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p, at::Tensor & out) { + return at::_ops::dist_out::redispatch(dispatchKeySet, self, other, p, out); + } + + // aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> () + inline void _histogramdd_bin_edges_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_bin_edges_out::redispatch(dispatchKeySet, self, bins, range, weight, density, out); + } + + // aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> () + inline void _histogramdd_bin_edges_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, at::TensorList out) { + return at::_ops::_histogramdd_bin_edges_out::redispatch(dispatchKeySet, self, bins, range, weight, density, out); + } + + // aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _histogramdd_from_bin_cts_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_from_bin_cts_out::redispatch(dispatchKeySet, self, bins, range, weight, density, out); + } + + // aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _histogramdd_from_bin_cts_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, at::Tensor & out) { + return at::_ops::_histogramdd_from_bin_cts_out::redispatch(dispatchKeySet, self, bins, range, weight, density, out); + } + + // aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _histogramdd_from_bin_tensors_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::TensorList bins, const c10::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_from_bin_tensors_out::redispatch(dispatchKeySet, self, bins, weight, density, out); + } + + // aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _histogramdd_from_bin_tensors_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, const c10::optional & weight, bool density, at::Tensor & out) { + return at::_ops::_histogramdd_from_bin_tensors_out::redispatch(dispatchKeySet, self, bins, weight, density, out); + } + + // aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & remainder_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::remainder_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & remainder_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::remainder_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & argsort_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool stable, int64_t dim=-1, bool descending=false) { + return at::_ops::argsort_stable_out::redispatch(dispatchKeySet, self, stable, dim, descending, out); + } + + // aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & argsort_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out) { + return at::_ops::argsort_stable_out::redispatch(dispatchKeySet, self, stable, dim, descending, out); + } + + // aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unfold_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward_out::redispatch(dispatchKeySet, grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out); + } + + // aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unfold_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_backward_out::redispatch(dispatchKeySet, grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out); + } + + // aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unfold_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward_out::redispatch(dispatchKeySet, grad_in, input_sizes, dim, size, step, out); + } + + // aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unfold_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_backward_out::redispatch(dispatchKeySet, grad_in, input_sizes, dim, size, step, out); + } + + // aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double mean=0, double std=1, c10::optional generator=c10::nullopt) { + return at::_ops::normal_out::redispatch(dispatchKeySet, self, mean, std, generator, out); + } + + // aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional generator, at::Tensor & out) { + return at::_ops::normal_out::redispatch(dispatchKeySet, self, mean, std, generator, out); + } + + // aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> () + inline void _amp_foreach_non_finite_check_and_unscale_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::redispatch(dispatchKeySet, self, found_inf, inv_scale, out); + } + + // aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> () + inline void _amp_foreach_non_finite_check_and_unscale_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::redispatch(dispatchKeySet, self, found_inf, inv_scale, out); + } + + // aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out) + inline ::std::tuple<::std::vector,at::Tensor> _amp_foreach_non_finite_check_and_unscale(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale::redispatch(dispatchKeySet, self, found_inf, inv_scale); + } + + // aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _amp_update_scale_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { + return at::_ops::_amp_update_scale_out::redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out); + } + + // aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _amp_update_scale_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out) { + return at::_ops::_amp_update_scale_out::redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out); + } + + // aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out) + inline ::std::tuple _amp_update_scale(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { + return at::_ops::_amp_update_scale::redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); + } + + // aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_add_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_add_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_add_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + inline void _foreach_add_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_List_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + inline void _foreach_add_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { + return at::_ops::_foreach_add_List_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_add_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_add_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_add_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + inline void _foreach_add_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_Tensor_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + inline void _foreach_add_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out) { + return at::_ops::_foreach_add_Tensor_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_sub_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_sub_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_sub_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_sub_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + inline void _foreach_sub_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_sub_List_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + inline void _foreach_sub_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { + return at::_ops::_foreach_sub_List_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_sub_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_sub_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_sub_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_sub_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_mul_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_mul_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_mul_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_mul_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_mul_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_mul_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Tensor & other) { + return at::_ops::_foreach_mul_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, at::TensorList out) { + return at::_ops::_foreach_mul_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_div_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_div_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_div_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_div_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_div_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_div_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_div_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_div_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_div_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_div_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_div_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_div_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> () + inline void _foreach_div_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Tensor & other) { + return at::_ops::_foreach_div_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> () + inline void _foreach_div_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, at::TensorList out) { + return at::_ops::_foreach_div_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_clamp_max_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_max_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_clamp_max_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_clamp_max_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_clamp_max_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_max_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_clamp_max_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_clamp_max_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_clamp_max_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_max_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_clamp_max_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_clamp_max_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_clamp_min_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_min_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_clamp_min_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_clamp_min_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_clamp_min_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_min_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_clamp_min_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_clamp_min_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_clamp_min_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_min_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_clamp_min_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_clamp_min_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_maximum_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_maximum_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_maximum_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_maximum_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_maximum_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_maximum_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_maximum_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_maximum_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_maximum_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_maximum_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_maximum_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_maximum_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_minimum_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_minimum_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_minimum_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_minimum_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_minimum_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_minimum_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_minimum_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_minimum_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_minimum_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_minimum_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_minimum_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_minimum_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () + inline void _foreach_addcdiv_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcdiv_Scalar_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () + inline void _foreach_addcdiv_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { + return at::_ops::_foreach_addcdiv_Scalar_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_addcdiv_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcdiv_ScalarList_out::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out); + } + + // aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_addcdiv_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_addcdiv_ScalarList_out::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out); + } + + // aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_addcdiv_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + return at::_ops::_foreach_addcdiv_Tensor_out::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out); + } + + // aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_addcdiv_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) { + return at::_ops::_foreach_addcdiv_Tensor_out::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out); + } + + // aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () + inline void _foreach_addcmul_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcmul_Scalar_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () + inline void _foreach_addcmul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { + return at::_ops::_foreach_addcmul_Scalar_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_addcmul_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcmul_ScalarList_out::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out); + } + + // aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_addcmul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_addcmul_ScalarList_out::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out); + } + + // aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_addcmul_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + return at::_ops::_foreach_addcmul_Tensor_out::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out); + } + + // aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_addcmul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) { + return at::_ops::_foreach_addcmul_Tensor_out::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out); + } + + // aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_abs_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_abs_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_abs_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_abs_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_acos_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_acos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_acos_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_acos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_asin_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_asin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_asin_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_asin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_atan_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_atan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_atan_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_atan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_ceil_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_ceil_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_ceil_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_ceil_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_cos_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_cos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_cos_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_cos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_cosh_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_cosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_cosh_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_cosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_erf_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_erf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_erf_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_erf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_erfc_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_erfc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_erfc_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_erfc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_exp_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_exp_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_exp_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_exp_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_expm1_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_expm1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_expm1_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_expm1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_floor_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_floor_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_floor_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_floor_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_frac_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_frac_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_frac_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_frac_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> () + inline void _foreach_lerp_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList tensors1, at::TensorList weights) { + return at::_ops::_foreach_lerp_List_out::redispatch(dispatchKeySet, self, tensors1, weights, out); + } + + // aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> () + inline void _foreach_lerp_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) { + return at::_ops::_foreach_lerp_List_out::redispatch(dispatchKeySet, self, tensors1, weights, out); + } + + // aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> () + inline void _foreach_lerp_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) { + return at::_ops::_foreach_lerp_Scalar_out::redispatch(dispatchKeySet, self, tensors1, weight, out); + } + + // aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> () + inline void _foreach_lerp_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) { + return at::_ops::_foreach_lerp_Scalar_out::redispatch(dispatchKeySet, self, tensors1, weight, out); + } + + // aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_lgamma_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_lgamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_lgamma_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_lgamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log10_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log10_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log10_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log10_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log1p_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log1p_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log1p_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log1p_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log2_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log2_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_neg_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_neg_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_neg_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_neg_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> () + inline void _foreach_norm_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & ord=2) { + return at::_ops::_foreach_norm_Scalar_out::redispatch(dispatchKeySet, self, ord, out); + } + + // aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> () + inline void _foreach_norm_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & ord, at::TensorList out) { + return at::_ops::_foreach_norm_Scalar_out::redispatch(dispatchKeySet, self, ord, out); + } + + // aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> () + inline void _foreach_pow_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList exponent) { + return at::_ops::_foreach_pow_List_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> () + inline void _foreach_pow_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent, at::TensorList out) { + return at::_ops::_foreach_pow_List_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> () + inline void _foreach_pow_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & exponent) { + return at::_ops::_foreach_pow_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> () + inline void _foreach_pow_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent, at::TensorList out) { + return at::_ops::_foreach_pow_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> () + inline void _foreach_pow_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::ArrayRef exponent) { + return at::_ops::_foreach_pow_ScalarList_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> () + inline void _foreach_pow_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef exponent, at::TensorList out) { + return at::_ops::_foreach_pow_ScalarList_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_reciprocal_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_reciprocal_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_reciprocal_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_reciprocal_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_round_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_round_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_round_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_round_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sigmoid_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sigmoid_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sign.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sign_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sign_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sign.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sign_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sign_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sin_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sin_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sinh_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sinh_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sqrt_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sqrt_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sqrt_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sqrt_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_tan_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_tan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_tan_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_tan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_tanh_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_tanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_tanh_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_tanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_trunc_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_trunc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_trunc_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_trunc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_zero_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_zero_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_zero_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_zero_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out + inline ::std::vector _foreach_zero(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_zero::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_copy.out(Tensor[] self, Tensor[] src, bool non_blocking=False, *, Tensor(a!)[] out) -> () + inline void _foreach_copy_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList src, bool non_blocking=false) { + return at::_ops::_foreach_copy_out::redispatch(dispatchKeySet, self, src, non_blocking, out); + } + + // aten::_foreach_copy.out(Tensor[] self, Tensor[] src, bool non_blocking=False, *, Tensor(a!)[] out) -> () + inline void _foreach_copy_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList src, bool non_blocking, at::TensorList out) { + return at::_ops::_foreach_copy_out::redispatch(dispatchKeySet, self, src, non_blocking, out); + } + + // aten::_foreach_copy(Tensor[] self, Tensor[] src, bool non_blocking=False) -> Tensor[] self_out + inline ::std::vector _foreach_copy(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList src, bool non_blocking=false) { + return at::_ops::_foreach_copy::redispatch(dispatchKeySet, self, src, non_blocking); + } + + // aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bucketize_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false) { + return at::_ops::bucketize_Scalar_out::redispatch(dispatchKeySet, self, boundaries, out_int32, right, out); + } + + // aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bucketize_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) { + return at::_ops::bucketize_Scalar_out::redispatch(dispatchKeySet, self, boundaries, out_int32, right, out); + } + + // aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & glu_jvp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) { + return at::_ops::glu_jvp_out::redispatch(dispatchKeySet, glu, x, dx, dim, out); + } + + // aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & glu_jvp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) { + return at::_ops::glu_jvp_out::redispatch(dispatchKeySet, glu, x, dx, dim, out); + } + + // aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & glu_backward_jvp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) { + return at::_ops::glu_backward_jvp_out::redispatch(dispatchKeySet, grad_x, grad_glu, x, dgrad_glu, dx, dim, out); + } + + // aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & glu_backward_jvp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out) { + return at::_ops::glu_backward_jvp_out::redispatch(dispatchKeySet, grad_x, grad_glu, x, dgrad_glu, dx, dim, out); + } + + // aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardswish_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardswish_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardswish_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardswish_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rrelu_with_noise_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) { + return at::_ops::rrelu_with_noise_backward_out::redispatch(dispatchKeySet, grad_output, self, noise, lower, upper, training, self_is_result, out); + } + + // aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rrelu_with_noise_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result, at::Tensor & out) { + return at::_ops::rrelu_with_noise_backward_out::redispatch(dispatchKeySet, grad_output, self, noise, lower, upper, training, self_is_result, out); + } + + // aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_adaptive_avg_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::mkldnn_adaptive_avg_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_adaptive_avg_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::mkldnn_adaptive_avg_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), out); + } + + // aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::_adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), out); + } + + // aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::_adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::_adaptive_avg_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_adaptive_avg_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), out); + } + + // aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::_adaptive_avg_pool3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(output_size), out); + } + + // aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool3d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::_adaptive_avg_pool3d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::_adaptive_avg_pool3d_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_adaptive_avg_pool3d_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _slow_conv2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask_out::redispatch(dispatchKeySet, grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask, out0, out1, out2); + } + + // aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _slow_conv2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_slow_conv2d_backward_output_mask_out::redispatch(dispatchKeySet, grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask, out0, out1, out2); + } + + // aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _slow_conv2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask_out::redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2); + } + + // aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _slow_conv2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_slow_conv2d_backward_output_mask_out::redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2); + } + + // aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conv_depthwise3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) { + return at::_ops::conv_depthwise3d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); + } + + // aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conv_depthwise3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::conv_depthwise3d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); + } + + // aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conv_depthwise3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) { + return at::_ops::conv_depthwise3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conv_depthwise3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) { + return at::_ops::conv_depthwise3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_dilated2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated2d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); + } + + // aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_dilated2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated2d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); + } + + // aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_dilated2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_dilated2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_dilated2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_dilated3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated3d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); + } + + // aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_dilated3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated3d_out::redispatch(dispatchKeySet, self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); + } + + // aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_dilated3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_dilated3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_dilated3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isinf_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::isinf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isinf_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::isinf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_exp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::linalg_matrix_exp_out::redispatch(dispatchKeySet, self, out); + } + + // aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_exp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::linalg_matrix_exp_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_optional_intlist_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & values, at::OptionalIntArrayRef addends) { + return at::_ops::_test_optional_intlist_out::redispatch(dispatchKeySet, values, addends, out); + } + + // aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_optional_intlist_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) { + return at::_ops::_test_optional_intlist_out::redispatch(dispatchKeySet, values, addends, out); + } + + // aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_optional_filled_intlist_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & values, at::OptionalIntArrayRef addends) { + return at::_ops::_test_optional_filled_intlist_out::redispatch(dispatchKeySet, values, addends, out); + } + + // aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_optional_filled_intlist_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) { + return at::_ops::_test_optional_filled_intlist_out::redispatch(dispatchKeySet, values, addends, out); + } + + // aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_optional_floatlist_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & values, c10::optional> addends) { + return at::_ops::_test_optional_floatlist_out::redispatch(dispatchKeySet, values, addends, out); + } + + // aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_optional_floatlist_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, c10::optional> addends, at::Tensor & out) { + return at::_ops::_test_optional_floatlist_out::redispatch(dispatchKeySet, values, addends, out); + } + + // aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_warn_in_autograd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_test_warn_in_autograd_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_warn_in_autograd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_test_warn_in_autograd_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_autograd_multiple_dispatch_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_autograd_multiple_dispatch_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_autograd_multiple_dispatch_view_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_view_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_autograd_multiple_dispatch_view_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_test_autograd_multiple_dispatch_view_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & segment_reduce_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths={}, const c10::optional & indices={}, const c10::optional & offsets={}, int64_t axis=0, bool unsafe=false, const c10::optional & initial=c10::nullopt) { + return at::_ops::segment_reduce_out::redispatch(dispatchKeySet, data, reduce, lengths, indices, offsets, axis, unsafe, initial, out); + } + + // aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & segment_reduce_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial, at::Tensor & out) { + return at::_ops::segment_reduce_out::redispatch(dispatchKeySet, data, reduce, lengths, indices, offsets, axis, unsafe, initial, out); + } + + // aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _segment_reduce_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths={}, const c10::optional & offsets={}, int64_t axis=0, const c10::optional & initial=c10::nullopt) { + return at::_ops::_segment_reduce_backward_out::redispatch(dispatchKeySet, grad, output, data, reduce, lengths, offsets, axis, initial, out); + } + + // aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _segment_reduce_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & offsets, int64_t axis, const c10::optional & initial, at::Tensor & out) { + return at::_ops::_segment_reduce_backward_out::redispatch(dispatchKeySet, grad, output, data, reduce, lengths, offsets, axis, initial, out); + } + + // aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_from_tensor_list_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList list, c10::optional dtype=c10::nullopt, c10::optional layout=c10::nullopt, c10::optional device=c10::nullopt, c10::optional pin_memory=c10::nullopt) { + return at::_ops::_nested_tensor_from_tensor_list_out::redispatch(dispatchKeySet, list, dtype, layout, device, pin_memory, out); + } + + // aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_from_tensor_list_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, at::Tensor & out) { + return at::_ops::_nested_tensor_from_tensor_list_out::redispatch(dispatchKeySet, list, dtype, layout, device, pin_memory, out); + } + + // aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fw_primal_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t level) { + return at::_ops::_fw_primal_copy_out::redispatch(dispatchKeySet, self, level, out); + } + + // aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fw_primal_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level, at::Tensor & out) { + return at::_ops::_fw_primal_copy_out::redispatch(dispatchKeySet, self, level, out); + } + + // aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _make_dual_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { + return at::_ops::_make_dual_copy_out::redispatch(dispatchKeySet, primal, tangent, level, out); + } + + // aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _make_dual_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out) { + return at::_ops::_make_dual_copy_out::redispatch(dispatchKeySet, primal, tangent, level, out); + } + + // aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_as_real_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::view_as_real_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_as_real_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::view_as_real_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_as_complex_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::view_as_complex_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_as_complex_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::view_as_complex_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _conj_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_conj_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _conj_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_conj_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _neg_view_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_neg_view_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _neg_view_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_neg_view_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); + } + + // aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); + } + + // aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy_out::redispatch(dispatchKeySet, self, size, stride, storage_offset, out); + } + + // aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_copy_out::redispatch(dispatchKeySet, self, size, stride, storage_offset, out); + } + + // aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_broadcast_to_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_sparse_broadcast_to_copy_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_broadcast_to_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_sparse_broadcast_to_copy_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) { + return at::_ops::diagonal_copy_out::redispatch(dispatchKeySet, self, offset, dim1, dim2, out); + } + + // aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_copy_out::redispatch(dispatchKeySet, self, offset, dim1, dim2, out); + } + + // aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & expand_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, bool implicit=false) { + return at::_ops::expand_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), implicit, out); + } + + // aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & expand_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, bool implicit, at::Tensor & out) { + return at::_ops::expand_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), implicit, out); + } + + // aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & expand_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit=false) { + return at::_ops::expand_copy_out::redispatch(dispatchKeySet, self, size, implicit, out); + } + + // aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & expand_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit, at::Tensor & out) { + return at::_ops::expand_copy_out::redispatch(dispatchKeySet, self, size, implicit, out); + } + + // aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & permute_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::permute_copy_out::redispatch(dispatchKeySet, self, dims, out); + } + + // aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & permute_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::permute_copy_out::redispatch(dispatchKeySet, self, dims, out); + } + + // aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _reshape_alias_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::_reshape_alias_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); + } + + // aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _reshape_alias_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::_reshape_alias_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); + } + + // aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _reshape_alias_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::_reshape_alias_copy_out::redispatch(dispatchKeySet, self, size, stride, out); + } + + // aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _reshape_alias_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::_reshape_alias_copy_out::redispatch(dispatchKeySet, self, size, stride, out); + } + + // aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t index) { + return at::_ops::select_copy_int_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, int64_t index, at::Tensor & out) { + return at::_ops::select_copy_int_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, c10::SymInt index) { + return at::_ops::select_copy_int_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out) { + return at::_ops::select_copy_int_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & detach_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::detach_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & detach_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::detach_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_copy_Tensor_out::redispatch(dispatchKeySet, self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); + } + + // aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, int64_t step, at::Tensor & out) { + return at::_ops::slice_copy_Tensor_out::redispatch(dispatchKeySet, self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); + } + + // aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_copy_Tensor_out::redispatch(dispatchKeySet, self, dim, start, end, step, out); + } + + // aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, at::Tensor & out) { + return at::_ops::slice_copy_Tensor_out::redispatch(dispatchKeySet, self, dim, start, end, step, out); + } + + // aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & squeeze_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::squeeze_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & squeeze_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::squeeze_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & squeeze_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::squeeze_copy_dim_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & squeeze_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::squeeze_copy_dim_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & squeeze_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::squeeze_copy_dims_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & squeeze_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { + return at::_ops::squeeze_copy_dims_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & t_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::t_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & t_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::t_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & transpose_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::transpose_copy_int_out::redispatch(dispatchKeySet, self, dim0, dim1, out); + } + + // aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & transpose_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { + return at::_ops::transpose_copy_int_out::redispatch(dispatchKeySet, self, dim0, dim1, out); + } + + // aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unsqueeze_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::unsqueeze_copy_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unsqueeze_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::unsqueeze_copy_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _indices_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _indices_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _values_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_values_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _values_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_values_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & indices_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & indices_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & values_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::values_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & values_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::values_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & crow_indices_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::crow_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & crow_indices_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::crow_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & col_indices_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::col_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & col_indices_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::col_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ccol_indices_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::ccol_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ccol_indices_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::ccol_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & row_indices_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::row_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & row_indices_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::row_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::view_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), out); + } + + // aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::view_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRefSlow(size), out); + } + + // aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::view_copy_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::view_copy_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::ScalarType dtype) { + return at::_ops::view_copy_dtype_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::view_copy_dtype_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unfold_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { + return at::_ops::unfold_copy_out::redispatch(dispatchKeySet, self, dimension, size, step, out); + } + + // aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unfold_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_copy_out::redispatch(dispatchKeySet, self, dimension, size, step, out); + } + + // aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & alias_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::alias_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & alias_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::alias_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_padded_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt) { + return at::_ops::to_padded_tensor_out::redispatch(dispatchKeySet, self, padding, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, out); + } + + // aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_padded_tensor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size, at::Tensor & out) { + return at::_ops::to_padded_tensor_out::redispatch(dispatchKeySet, self, padding, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, out); + } + + // aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_padded_tensor_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size=c10::nullopt) { + return at::_ops::to_padded_tensor_out::redispatch(dispatchKeySet, self, padding, output_size, out); + } + + // aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_padded_tensor_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::to_padded_tensor_out::redispatch(dispatchKeySet, self, padding, output_size, out); + } + + // aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _transformer_encoder_layer_fwd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask={}, c10::optional mask_type=c10::nullopt) { + return at::_ops::_transformer_encoder_layer_fwd_out::redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out); + } + + // aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _transformer_encoder_layer_fwd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type, at::Tensor & out) { + return at::_ops::_transformer_encoder_layer_fwd_out::redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out); + } + + // aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _native_multi_head_attention_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask={}, bool need_weights=true, bool average_attn_weights=true, c10::optional mask_type=c10::nullopt) { + return at::_ops::_native_multi_head_attention_out::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type, out0, out1); + } + + // aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _native_multi_head_attention_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_native_multi_head_attention_out::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type, out0, out1); + } + + // aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _triton_scaled_dot_attention_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p=0.0) { + return at::_ops::_triton_scaled_dot_attention_out::redispatch(dispatchKeySet, q, k, v, dropout_p, out); + } + + // aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _triton_scaled_dot_attention_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out) { + return at::_ops::_triton_scaled_dot_attention_out::redispatch(dispatchKeySet, q, k, v, dropout_p, out); + } + + // aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _triton_multi_head_attention_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask={}) { + return at::_ops::_triton_multi_head_attention_out::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out); + } + + // aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _triton_multi_head_attention_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask, at::Tensor & out) { + return at::_ops::_triton_multi_head_attention_out::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out); + } + + // aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _foobar_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true) { + return at::_ops::_foobar_out::redispatch(dispatchKeySet, self, arg1, arg2, arg3, out); + } + + // aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _foobar_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) { + return at::_ops::_foobar_out::redispatch(dispatchKeySet, self, arg1, arg2, arg3, out); + } + + // aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_adam_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_out::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + + // aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_adam_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adam_out::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + + // aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + + // aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_adam_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_tensor_lr_out::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + + // aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_adam_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adam_tensor_lr_out::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + + // aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_tensor_lr::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + + // aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_adamw_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adamw_out::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + + // aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_adamw_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adamw_out::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + + // aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adamw::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + + // aten::_fused_adamw.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_adamw_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adamw_tensor_lr_out::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + + // aten::_fused_adamw.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_adamw_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adamw_tensor_lr_out::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + + // aten::_fused_adamw.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adamw_tensor_lr::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + + // aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_sgd_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_sgd_out::redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out); + } + + // aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_sgd_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_sgd_out::redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out); + } + + // aten::_fused_sgd(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out) + inline ::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_sgd::redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + } + + // aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_sgd_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_sgd_tensor_lr_out::redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out); + } + + // aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_sgd_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_sgd_tensor_lr_out::redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out); + } + + // aten::_fused_sgd.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out) + inline ::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_sgd_tensor_lr::redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + } +} // namespace redispatch + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h new file mode 100644 index 0000000000000000000000000000000000000000..5bb358e0e537796aefe5fcfa6afe72de713e0e33 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h @@ -0,0 +1,3099 @@ +// This file contains all native_functions that can be registered to +// and the schema string that they should be registered with + +Tensor _cast_Byte(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Char(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Double(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Float(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Int(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Long(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Short(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Half(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +void _backward(const Tensor & self, TensorList inputs, const c10::optional & gradient, c10::optional retain_graph, bool create_graph); // {"schema": "aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()", "dispatch": "False", "default": "True"} +void set_data(Tensor & self, const Tensor & new_data); // {"schema": "aten::set_data(Tensor(a!) self, Tensor new_data) -> ()", "dispatch": "False", "default": "True"} +Tensor data(const Tensor & self); // {"schema": "aten::data(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +bool is_leaf(const Tensor & self); // {"schema": "aten::is_leaf(Tensor self) -> bool", "dispatch": "False", "default": "True"} +int64_t output_nr(const Tensor & self); // {"schema": "aten::output_nr(Tensor self) -> int", "dispatch": "False", "default": "True"} +int64_t _version(const Tensor & self); // {"schema": "aten::_version(Tensor self) -> int", "dispatch": "False", "default": "True"} +Tensor & requires_grad_(Tensor & self, bool requires_grad); // {"schema": "aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)", "dispatch": "False", "default": "True"} +void retain_grad(Tensor & self); // {"schema": "aten::retain_grad(Tensor(a!) self) -> ()", "dispatch": "False", "default": "True"} +bool retains_grad(const Tensor & self); // {"schema": "aten::retains_grad(Tensor self) -> bool", "dispatch": "False", "default": "True"} +Tensor _fw_primal(const Tensor & self, int64_t level); // {"schema": "aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor _make_dual(const Tensor & primal, const Tensor & tangent, int64_t level); // {"schema": "aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)", "dispatch": "True", "default": "True"} +::std::tuple _unpack_dual(const Tensor & dual, int64_t level); // {"schema": "aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)", "dispatch": "False", "default": "True"} +Tensor _new_zeros_with_same_feature_meta(const Tensor & self, const Tensor & other, int64_t self_num_batch_dims); // {"schema": "aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor", "dispatch": "True", "default": "True"} +bool _has_same_storage_numel(const Tensor & self, const Tensor & other); // {"schema": "aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool", "dispatch": "True", "default": "True"} +Tensor & rename_(Tensor & self, c10::optional names); // {"schema": "aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor rename(const Tensor & self, c10::optional names); // {"schema": "aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor align_to(const Tensor & self, DimnameList names); // {"schema": "aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor align_to(const Tensor & self, DimnameList order, int64_t ellipsis_idx); // {"schema": "aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor align_as(const Tensor & self, const Tensor & other); // {"schema": "aten::align_as(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector align_tensors(TensorList tensors); // {"schema": "aten::align_tensors(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +void _assert_async(const Tensor & self); // {"schema": "aten::_assert_async(Tensor self) -> ()", "dispatch": "True", "default": "False"} +void _assert_async(const Tensor & self, c10::string_view assert_msg); // {"schema": "aten::_assert_async.msg(Tensor self, str assert_msg) -> ()", "dispatch": "True", "default": "False"} +void _assert_scalar(const Scalar & self, c10::string_view assert_msg); // {"schema": "aten::_assert_scalar(Scalar self, str assert_msg) -> ()", "dispatch": "True", "default": "True"} +Tensor _functional_assert_scalar(const Scalar & self, c10::string_view assert_msg, const Tensor & dep_token); // {"schema": "aten::_functional_assert_scalar(Scalar self, str assert_msg, Tensor dep_token) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _functional_assert_async(const Tensor & self, c10::string_view assert_msg, const Tensor & dep_token); // {"schema": "aten::_functional_assert_async.msg(Tensor self, str assert_msg, Tensor dep_token) -> Tensor", "dispatch": "True", "default": "False"} +void _assert_tensor_metadata(const Tensor & a, OptionalSymIntArrayRef size, OptionalSymIntArrayRef stride, c10::optional dtype); // {"schema": "aten::_assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None) -> ()", "dispatch": "False", "default": "True"} +void _print(c10::string_view s); // {"schema": "aten::_print(str s) -> ()", "dispatch": "True", "default": "True"} +void sym_constrain_range(const Scalar & size, c10::optional min, c10::optional max); // {"schema": "aten::sym_constrain_range(Scalar size, *, int? min=None, int? max=None) -> ()", "dispatch": "True", "default": "True"} +void sym_constrain_range_for_size(const Scalar & size, c10::optional min, c10::optional max); // {"schema": "aten::sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> ()", "dispatch": "True", "default": "True"} +Tensor _functional_sym_constrain_range(const Scalar & size, c10::optional min, c10::optional max, const Tensor & dep_token); // {"schema": "aten::_functional_sym_constrain_range(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _functional_sym_constrain_range_for_size(const Scalar & size, c10::optional min, c10::optional max, const Tensor & dep_token); // {"schema": "aten::_functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _make_dep_token(c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor refine_names(const Tensor & self, DimnameList names); // {"schema": "aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)", "dispatch": "False", "default": "True"} +bool _use_cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank); // {"schema": "aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool", "dispatch": "True", "default": "False"} +bool _use_cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank); // {"schema": "aten::_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool", "dispatch": "True", "default": "False"} +::std::tuple _cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity); // {"schema": "aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity); // {"schema": "aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +bool _use_cudnn_rnn_flatten_weight(); // {"schema": "aten::_use_cudnn_rnn_flatten_weight() -> bool", "dispatch": "False", "default": "True"} +Tensor _cudnn_rnn_flatten_weight(TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional); // {"schema": "aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _cudnn_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state); // {"schema": "aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple> _cudnn_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const c10::optional & cx, const Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const Tensor & reserve, ::std::array output_mask); // {"schema": "aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])", "dispatch": "True", "default": "False"} +Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "False"} +int64_t _debug_has_internal_overlap(const Tensor & self); // {"schema": "aten::_debug_has_internal_overlap(Tensor self) -> int", "dispatch": "False", "default": "True"} +::std::tuple _fused_dropout(const Tensor & self, double p, c10::optional generator); // {"schema": "aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _masked_scale(const Tensor & self, const Tensor & mask, double scale); // {"schema": "aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple native_dropout(const Tensor & input, double p, c10::optional train); // {"schema": "aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor native_dropout_backward(const Tensor & grad_output, const Tensor & mask, double scale); // {"schema": "aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _sobol_engine_draw(const Tensor & quasi, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional dtype); // {"schema": "aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor & _sobol_engine_ff_(Tensor & self, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated); // {"schema": "aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & _sobol_engine_scramble_(Tensor & self, const Tensor & ltm, int64_t dimension); // {"schema": "aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & _sobol_engine_initialize_state_(Tensor & self, int64_t dimension); // {"schema": "aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor _reshape_from_tensor(const Tensor & self, const Tensor & shape); // {"schema": "aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _shape_as_tensor(const Tensor & self); // {"schema": "aten::_shape_as_tensor(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor dropout(const Tensor & input, double p, bool train); // {"schema": "aten::dropout(Tensor input, float p, bool train) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & dropout_(Tensor & self, double p, bool train); // {"schema": "aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor feature_dropout(const Tensor & input, double p, bool train); // {"schema": "aten::feature_dropout(Tensor input, float p, bool train) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & feature_dropout_(Tensor & self, double p, bool train); // {"schema": "aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor alpha_dropout(const Tensor & input, double p, bool train); // {"schema": "aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & alpha_dropout_(Tensor & self, double p, bool train); // {"schema": "aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor feature_alpha_dropout(const Tensor & input, double p, bool train); // {"schema": "aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & feature_alpha_dropout_(Tensor & self, double p, bool train); // {"schema": "aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor abs(const Tensor & self); // {"schema": "aten::abs(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & abs_(Tensor & self); // {"schema": "aten::abs_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & abs_out(const Tensor & self, Tensor & out); // {"schema": "aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor absolute(const Tensor & self); // {"schema": "aten::absolute(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & absolute_(Tensor & self); // {"schema": "aten::absolute_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & absolute_out(const Tensor & self, Tensor & out); // {"schema": "aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor angle(const Tensor & self); // {"schema": "aten::angle(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & angle_out(const Tensor & self, Tensor & out); // {"schema": "aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor view_as_real(const Tensor & self); // {"schema": "aten::view_as_real(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor view_as_complex(const Tensor & self); // {"schema": "aten::view_as_complex(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor sgn(const Tensor & self); // {"schema": "aten::sgn(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sgn_(Tensor & self); // {"schema": "aten::sgn_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sgn_out(const Tensor & self, Tensor & out); // {"schema": "aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor chalf(const Tensor & self, c10::optional memory_format); // {"schema": "aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor real(const Tensor & self); // {"schema": "aten::real(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor imag(const Tensor & self); // {"schema": "aten::imag(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _conj(const Tensor & self); // {"schema": "aten::_conj(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor conj(const Tensor & self); // {"schema": "aten::conj(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _conj_physical(const Tensor & self); // {"schema": "aten::_conj_physical(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor conj_physical(const Tensor & self); // {"schema": "aten::conj_physical(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & conj_physical_out(const Tensor & self, Tensor & out); // {"schema": "aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & conj_physical_(Tensor & self); // {"schema": "aten::conj_physical_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor resolve_conj(const Tensor & self); // {"schema": "aten::resolve_conj(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor resolve_neg(const Tensor & self); // {"schema": "aten::resolve_neg(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _neg_view(const Tensor & self); // {"schema": "aten::_neg_view(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor acos(const Tensor & self); // {"schema": "aten::acos(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & acos_(Tensor & self); // {"schema": "aten::acos_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & acos_out(const Tensor & self, Tensor & out); // {"schema": "aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arccos(const Tensor & self); // {"schema": "aten::arccos(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arccos_(Tensor & self); // {"schema": "aten::arccos_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arccos_out(const Tensor & self, Tensor & out); // {"schema": "aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor avg_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad); // {"schema": "aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor", "dispatch": "False", "default": "True"} +Tensor adaptive_avg_pool1d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor add(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & add_(Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & add_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _add_relu(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _add_relu_(Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _add_relu_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _add_relu(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _add_relu_(Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor add(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & add_(Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & addmv_out(const Tensor & self, const Tensor & mat, const Tensor & vec, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & addr_out(const Tensor & self, const Tensor & vec1, const Tensor & vec2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor affine_grid_generator(const Tensor & theta, c10::SymIntArrayRef size, bool align_corners); // {"schema": "aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor", "dispatch": "True", "default": "True"} +Tensor affine_grid_generator_backward(const Tensor & grad, c10::SymIntArrayRef size, bool align_corners); // {"schema": "aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _is_all_true(const Tensor & self); // {"schema": "aten::_is_all_true(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _is_any_true(const Tensor & self); // {"schema": "aten::_is_any_true(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _test_check_tensor(const Tensor & self); // {"schema": "aten::_test_check_tensor(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_functorch_fallback(const Tensor & self, const Tensor & other); // {"schema": "aten::_test_functorch_fallback(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor all(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor all(const Tensor & self, OptionalIntArrayRef dim, bool keepdim); // {"schema": "aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & all_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & out); // {"schema": "aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & all_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor all(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & all_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & out); // {"schema": "aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +bool allclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan); // {"schema": "aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool", "dispatch": "True", "default": "True"} +Tensor any(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor any(const Tensor & self, OptionalIntArrayRef dim, bool keepdim); // {"schema": "aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & any_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & out); // {"schema": "aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & any_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor any(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & any_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & out); // {"schema": "aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor arange(const Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor arange(const Scalar & start, const Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor arange(const Scalar & start, const Scalar & end, const Scalar & step, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & arange_out(const Scalar & end, Tensor & out); // {"schema": "aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & arange_out(const Scalar & start, const Scalar & end, const Scalar & step, Tensor & out); // {"schema": "aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _dim_arange(const Tensor & like, int64_t dim); // {"schema": "aten::_dim_arange(Tensor like, int dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor argmax(const Tensor & self, c10::optional dim, bool keepdim); // {"schema": "aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & argmax_out(const Tensor & self, c10::optional dim, bool keepdim, Tensor & out); // {"schema": "aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor argmin(const Tensor & self, c10::optional dim, bool keepdim); // {"schema": "aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & argmin_out(const Tensor & self, c10::optional dim, bool keepdim, Tensor & out); // {"schema": "aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor acosh(const Tensor & self); // {"schema": "aten::acosh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & acosh_(Tensor & self); // {"schema": "aten::acosh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & acosh_out(const Tensor & self, Tensor & out); // {"schema": "aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arccosh(const Tensor & self); // {"schema": "aten::arccosh(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arccosh_(Tensor & self); // {"schema": "aten::arccosh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arccosh_out(const Tensor & self, Tensor & out); // {"schema": "aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor asinh(const Tensor & self); // {"schema": "aten::asinh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & asinh_(Tensor & self); // {"schema": "aten::asinh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & asinh_out(const Tensor & self, Tensor & out); // {"schema": "aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arcsinh(const Tensor & self); // {"schema": "aten::arcsinh(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arcsinh_(Tensor & self); // {"schema": "aten::arcsinh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arcsinh_out(const Tensor & self, Tensor & out); // {"schema": "aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor atanh(const Tensor & self); // {"schema": "aten::atanh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & atanh_(Tensor & self); // {"schema": "aten::atanh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & atanh_out(const Tensor & self, Tensor & out); // {"schema": "aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arctanh(const Tensor & self); // {"schema": "aten::arctanh(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arctanh_(Tensor & self); // {"schema": "aten::arctanh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arctanh_out(const Tensor & self, Tensor & out); // {"schema": "aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor as_strided(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); // {"schema": "aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)", "dispatch": "True", "default": "False"} +const Tensor & as_strided_(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); // {"schema": "aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor asin(const Tensor & self); // {"schema": "aten::asin(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & asin_(Tensor & self); // {"schema": "aten::asin_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & asin_out(const Tensor & self, Tensor & out); // {"schema": "aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arcsin(const Tensor & self); // {"schema": "aten::arcsin(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arcsin_(Tensor & self); // {"schema": "aten::arcsin_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arcsin_out(const Tensor & self, Tensor & out); // {"schema": "aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor atan(const Tensor & self); // {"schema": "aten::atan(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & atan_(Tensor & self); // {"schema": "aten::atan_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & atan_out(const Tensor & self, Tensor & out); // {"schema": "aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arctan(const Tensor & self); // {"schema": "aten::arctan(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arctan_(Tensor & self); // {"schema": "aten::arctan_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arctan_out(const Tensor & self, Tensor & out); // {"schema": "aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor atleast_1d(const Tensor & self); // {"schema": "aten::atleast_1d(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector atleast_1d(TensorList tensors); // {"schema": "aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor atleast_2d(const Tensor & self); // {"schema": "aten::atleast_2d(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector atleast_2d(TensorList tensors); // {"schema": "aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor atleast_3d(const Tensor & self); // {"schema": "aten::atleast_3d(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector atleast_3d(TensorList tensors); // {"schema": "aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & baddbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & baddbmm_out(const Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bartlett_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bartlett_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor batch_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled); // {"schema": "aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantized_batch_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & mean, const Tensor & var, double eps, double output_scale, int64_t output_zero_point); // {"schema": "aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _batch_norm_impl_index(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled); // {"schema": "aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)", "dispatch": "False", "default": "True"} +::std::tuple _batch_norm_impl_index_backward(int64_t impl_index, const Tensor & input, const Tensor & grad_output, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var_transform, bool train, double eps, ::std::array output_mask, const Tensor & reservedSpace); // {"schema": "aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor bernoulli(const Tensor & self, c10::optional generator); // {"schema": "aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bernoulli_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bernoulli_(Tensor & self, const Tensor & p, c10::optional generator); // {"schema": "aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bernoulli_(Tensor & self, double p, c10::optional generator); // {"schema": "aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bernoulli(const Tensor & self, double p, c10::optional generator); // {"schema": "aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bilinear(const Tensor & input1, const Tensor & input2, const Tensor & weight, const c10::optional & bias); // {"schema": "aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction); // {"schema": "aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & binary_cross_entropy_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, Tensor & out); // {"schema": "aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction); // {"schema": "aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & binary_cross_entropy_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, Tensor & grad_input); // {"schema": "aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor binary_cross_entropy_with_logits(const Tensor & self, const Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction); // {"schema": "aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bincount(const Tensor & self, const c10::optional & weights, int64_t minlength); // {"schema": "aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor", "dispatch": "True", "default": "False"} +Tensor bitwise_not(const Tensor & self); // {"schema": "aten::bitwise_not(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_not_(Tensor & self); // {"schema": "aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_not_out(const Tensor & self, Tensor & out); // {"schema": "aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & copysign_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor copysign(const Tensor & self, const Tensor & other); // {"schema": "aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & copysign_(Tensor & self, const Tensor & other); // {"schema": "aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor copysign(const Tensor & self, const Scalar & other); // {"schema": "aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & copysign_(Tensor & self, const Scalar & other); // {"schema": "aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & copysign_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _lazy_clone(const Tensor & self); // {"schema": "aten::_lazy_clone(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor logical_not(const Tensor & self); // {"schema": "aten::logical_not(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logical_not_(Tensor & self); // {"schema": "aten::logical_not_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logical_not_out(const Tensor & self, Tensor & out); // {"schema": "aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logical_xor(const Tensor & self, const Tensor & other); // {"schema": "aten::logical_xor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logical_xor_(Tensor & self, const Tensor & other); // {"schema": "aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logical_xor_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logical_and(const Tensor & self, const Tensor & other); // {"schema": "aten::logical_and(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logical_and_(Tensor & self, const Tensor & other); // {"schema": "aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logical_and_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logical_or(const Tensor & self, const Tensor & other); // {"schema": "aten::logical_or(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logical_or_(Tensor & self, const Tensor & other); // {"schema": "aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logical_or_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor blackman_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor blackman_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bmm(const Tensor & self, const Tensor & mat2); // {"schema": "aten::bmm(Tensor self, Tensor mat2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bmm_out(const Tensor & self, const Tensor & mat2, Tensor & out); // {"schema": "aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::vector broadcast_tensors(TensorList tensors); // {"schema": "aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor broadcast_to(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _sparse_broadcast_to(const Tensor & self, IntArrayRef size); // {"schema": "aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor cat(const ITensorListRef & tensors, int64_t dim); // {"schema": "aten::cat(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cat_out(const ITensorListRef & tensors, int64_t dim, Tensor & out); // {"schema": "aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cat(TensorList tensors, Dimname dim); // {"schema": "aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & cat_out(TensorList tensors, Dimname dim, Tensor & out); // {"schema": "aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor concat(TensorList tensors, int64_t dim); // {"schema": "aten::concat(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & concat_out(TensorList tensors, int64_t dim, Tensor & out); // {"schema": "aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor concat(TensorList tensors, Dimname dim); // {"schema": "aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & concat_out(TensorList tensors, Dimname dim, Tensor & out); // {"schema": "aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor concatenate(TensorList tensors, int64_t dim); // {"schema": "aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & concatenate_out(TensorList tensors, int64_t dim, Tensor & out); // {"schema": "aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor concatenate(TensorList tensors, Dimname dim); // {"schema": "aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & concatenate_out(TensorList tensors, Dimname dim, Tensor & out); // {"schema": "aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor block_diag(TensorList tensors); // {"schema": "aten::block_diag(Tensor[] tensors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor ceil(const Tensor & self); // {"schema": "aten::ceil(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ceil_(Tensor & self); // {"schema": "aten::ceil_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ceil_out(const Tensor & self, Tensor & out); // {"schema": "aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor chain_matmul(TensorList matrices); // {"schema": "aten::chain_matmul(Tensor[] matrices) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & chain_matmul_out(TensorList matrices, Tensor & out); // {"schema": "aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::vector unsafe_chunk(const Tensor & self, int64_t chunks, int64_t dim); // {"schema": "aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector chunk(const Tensor & self, int64_t chunks, int64_t dim); // {"schema": "aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]", "dispatch": "True", "default": "True"} +::std::vector tensor_split(const Tensor & self, c10::SymInt sections, int64_t dim); // {"schema": "aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector tensor_split(const Tensor & self, c10::SymIntArrayRef indices, int64_t dim); // {"schema": "aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector tensor_split(const Tensor & self, const Tensor & tensor_indices_or_sections, int64_t dim); // {"schema": "aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +Tensor clamp(const Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor clamp(const Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & clamp_(Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_(Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_out(const Tensor & self, const c10::optional & min, const c10::optional & max, Tensor & out); // {"schema": "aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & clamp_out(const Tensor & self, const c10::optional & min, const c10::optional & max, Tensor & out); // {"schema": "aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor clamp_max(const Tensor & self, const Scalar & max); // {"schema": "aten::clamp_max(Tensor self, Scalar max) -> Tensor", "dispatch": "True", "default": "True"} +Tensor clamp_max(const Tensor & self, const Tensor & max); // {"schema": "aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & clamp_max_(Tensor & self, const Scalar & max); // {"schema": "aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_max_(Tensor & self, const Tensor & max); // {"schema": "aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_max_out(const Tensor & self, const Scalar & max, Tensor & out); // {"schema": "aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & clamp_max_out(const Tensor & self, const Tensor & max, Tensor & out); // {"schema": "aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor clamp_min(const Tensor & self, const Scalar & min); // {"schema": "aten::clamp_min(Tensor self, Scalar min) -> Tensor", "dispatch": "True", "default": "True"} +Tensor clamp_min(const Tensor & self, const Tensor & min); // {"schema": "aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & clamp_min_(Tensor & self, const Scalar & min); // {"schema": "aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_min_(Tensor & self, const Tensor & min); // {"schema": "aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_min_out(const Tensor & self, const Scalar & min, Tensor & out); // {"schema": "aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & clamp_min_out(const Tensor & self, const Tensor & min, Tensor & out); // {"schema": "aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor clip(const Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor clip(const Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & clip_(Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & clip_(Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & clip_out(const Tensor & self, const c10::optional & min, const c10::optional & max, Tensor & out); // {"schema": "aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & clip_out(const Tensor & self, const c10::optional & min, const c10::optional & max, Tensor & out); // {"schema": "aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +bool cudnn_is_acceptable(const Tensor & self); // {"schema": "aten::cudnn_is_acceptable(Tensor self) -> bool", "dispatch": "False", "default": "True"} +Tensor complex(const Tensor & real, const Tensor & imag); // {"schema": "aten::complex(Tensor real, Tensor imag) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & complex_out(const Tensor & real, const Tensor & imag, Tensor & out); // {"schema": "aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor polar(const Tensor & abs, const Tensor & angle); // {"schema": "aten::polar(Tensor abs, Tensor angle) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & polar_out(const Tensor & abs, const Tensor & angle, Tensor & out); // {"schema": "aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor constant_pad_nd(const Tensor & self, c10::SymIntArrayRef pad, const Scalar & value); // {"schema": "aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor contiguous(const Tensor & self, MemoryFormat memory_format); // {"schema": "aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor convolution(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups); // {"schema": "aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple convolution_backward(const Tensor & grad_output, const Tensor & input, const Tensor & weight, OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask); // {"schema": "aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor convolution_overrideable(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups); // {"schema": "aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple convolution_backward_overrideable(const Tensor & grad_output, const Tensor & input, const Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask); // {"schema": "aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)", "dispatch": "True", "default": "True"} +Tensor _convolution(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32); // {"schema": "aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _convolution(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, IntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled); // {"schema": "aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _convolution_mode(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _convolution_double_backward(const c10::optional & ggI, const c10::optional & ggW, const c10::optional & ggb, const Tensor & gO, const Tensor & weight, const Tensor & self, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask); // {"schema": "aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor conv1d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv2d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv3d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv1d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding=\"valid\", SymInt[1] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv2d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding=\"valid\", SymInt[2] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv3d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding=\"valid\", SymInt[3] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv_tbc(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad); // {"schema": "aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple conv_tbc_backward(const Tensor & self, const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t pad); // {"schema": "aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor conv_transpose1d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation); // {"schema": "aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv_transpose2d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation); // {"schema": "aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv_transpose3d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation); // {"schema": "aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor copy(const Tensor & self, const Tensor & src, bool non_blocking); // {"schema": "aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & copy_(Tensor & self, const Tensor & src, bool non_blocking); // {"schema": "aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _copy_from(const Tensor & self, const Tensor & dst, bool non_blocking); // {"schema": "aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _copy_from_and_resize(const Tensor & self, const Tensor & dst); // {"schema": "aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cos(const Tensor & self); // {"schema": "aten::cos(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cos_(Tensor & self); // {"schema": "aten::cos_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cos_out(const Tensor & self, Tensor & out); // {"schema": "aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cosh(const Tensor & self); // {"schema": "aten::cosh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cosh_(Tensor & self); // {"schema": "aten::cosh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cosh_out(const Tensor & self, Tensor & out); // {"schema": "aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cosine_embedding_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction); // {"schema": "aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor count_nonzero(const Tensor & self, IntArrayRef dim); // {"schema": "aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor count_nonzero(const Tensor & self, c10::optional dim); // {"schema": "aten::count_nonzero(Tensor self, int? dim=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor cov(const Tensor & self, int64_t correction, const c10::optional & fweights, const c10::optional & aweights); // {"schema": "aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor corrcoef(const Tensor & self); // {"schema": "aten::corrcoef(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cudnn_affine_grid_generator(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W); // {"schema": "aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid", "dispatch": "True", "default": "False"} +Tensor cudnn_affine_grid_generator_backward(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W); // {"schema": "aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta", "dispatch": "True", "default": "False"} +::std::tuple cudnn_batch_norm(const Tensor & input, const Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); // {"schema": "aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple cudnn_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const Tensor & reserveSpace); // {"schema": "aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor cudnn_convolution(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32); // {"schema": "aten::cudnn_convolution(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & cudnn_convolution_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, Tensor & out); // {"schema": "aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cudnn_convolution_transpose(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32); // {"schema": "aten::cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _mps_convolution_transpose(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::_mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple mps_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask); // {"schema": "aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor cudnn_convolution_relu(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cudnn_convolution_add_relu(const Tensor & self, const Tensor & weight, const Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cudnn_grid_sampler(const Tensor & self, const Tensor & grid); // {"schema": "aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output", "dispatch": "True", "default": "False"} +::std::tuple cudnn_grid_sampler_backward(const Tensor & self, const Tensor & grid, const Tensor & grad_output); // {"schema": "aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)", "dispatch": "True", "default": "False"} +::std::tuple cummax(const Tensor & self, int64_t dim); // {"schema": "aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple cummax_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices); // {"schema": "aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "True"} +::std::tuple cummax(const Tensor & self, Dimname dim); // {"schema": "aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple cummax_out(const Tensor & self, Dimname dim, Tensor & values, Tensor & indices); // {"schema": "aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +void _cummax_helper(const Tensor & self, Tensor & values, Tensor & indices, int64_t dim); // {"schema": "aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()", "dispatch": "True", "default": "False"} +::std::tuple cummin(const Tensor & self, int64_t dim); // {"schema": "aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple cummin_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices); // {"schema": "aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "True"} +::std::tuple cummin(const Tensor & self, Dimname dim); // {"schema": "aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple cummin_out(const Tensor & self, Dimname dim, Tensor & values, Tensor & indices); // {"schema": "aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +void _cummin_helper(const Tensor & self, Tensor & values, Tensor & indices, int64_t dim); // {"schema": "aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()", "dispatch": "True", "default": "False"} +Tensor cummaxmin_backward(const Tensor & grad, const Tensor & input, const Tensor & indices, int64_t dim); // {"schema": "aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cumprod(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cumprod_(Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cumprod_out(const Tensor & self, int64_t dim, c10::optional dtype, Tensor & out); // {"schema": "aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cumprod(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & cumprod_(Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & cumprod_out(const Tensor & self, Dimname dim, c10::optional dtype, Tensor & out); // {"schema": "aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor cumprod_backward(const Tensor & grad, const Tensor & input, int64_t dim, const Tensor & output); // {"schema": "aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cumsum(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cumsum_(Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cumsum_out(const Tensor & self, int64_t dim, c10::optional dtype, Tensor & out); // {"schema": "aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cumsum(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & cumsum_(Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & cumsum_out(const Tensor & self, Dimname dim, c10::optional dtype, Tensor & out); // {"schema": "aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor cumulative_trapezoid(const Tensor & y, const Tensor & x, int64_t dim); // {"schema": "aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cumulative_trapezoid(const Tensor & y, const Scalar & dx, int64_t dim); // {"schema": "aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity); // {"schema": "aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity); // {"schema": "aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool zero_infinity); // {"schema": "aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, bool zero_infinity); // {"schema": "aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _ctc_loss_backward(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank, bool zero_infinity); // {"schema": "aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _ctc_loss_backward(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank, bool zero_infinity); // {"schema": "aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor diag_embed(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor diagflat(const Tensor & self, int64_t offset); // {"schema": "aten::diagflat(Tensor self, int offset=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor diagonal(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor linalg_diagonal(const Tensor & A, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor diagonal(const Tensor & self, Dimname outdim, Dimname dim1, Dimname dim2, int64_t offset); // {"schema": "aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor diagonal_backward(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fill_diagonal_(Tensor & self, const Scalar & fill_value, bool wrap); // {"schema": "aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor diff(const Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append); // {"schema": "aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & diff_out(const Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append, Tensor & out); // {"schema": "aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, const c10::optional & spacing, c10::optional dim, int64_t edge_order); // {"schema": "aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, const Scalar & spacing, IntArrayRef dim, int64_t edge_order); // {"schema": "aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, IntArrayRef dim, int64_t edge_order); // {"schema": "aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, ArrayRef spacing, c10::optional dim, int64_t edge_order); // {"schema": "aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, ArrayRef spacing, IntArrayRef dim, int64_t edge_order); // {"schema": "aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, TensorList spacing, c10::optional dim, int64_t edge_order); // {"schema": "aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, TensorList spacing, IntArrayRef dim, int64_t edge_order); // {"schema": "aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor div(const Tensor & self, const Tensor & other); // {"schema": "aten::div.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & div_(Tensor & self, const Tensor & other); // {"schema": "aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & div_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor div(const Tensor & self, const Tensor & other, c10::optional rounding_mode); // {"schema": "aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & div_(Tensor & self, const Tensor & other, c10::optional rounding_mode); // {"schema": "aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & div_out(const Tensor & self, const Tensor & other, c10::optional rounding_mode, Tensor & out); // {"schema": "aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor div(const Tensor & self, const Scalar & other); // {"schema": "aten::div.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & div_(Tensor & self, const Scalar & other); // {"schema": "aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor div(const Tensor & self, const Scalar & other, c10::optional rounding_mode); // {"schema": "aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & div_(Tensor & self, const Scalar & other, c10::optional rounding_mode); // {"schema": "aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor divide(const Tensor & self, const Tensor & other); // {"schema": "aten::divide.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & divide_(Tensor & self, const Tensor & other); // {"schema": "aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & divide_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor divide(const Tensor & self, const Scalar & other); // {"schema": "aten::divide.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & divide_(Tensor & self, const Scalar & other); // {"schema": "aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor divide(const Tensor & self, const Tensor & other, c10::optional rounding_mode); // {"schema": "aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & divide_(Tensor & self, const Tensor & other, c10::optional rounding_mode); // {"schema": "aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & divide_out(const Tensor & self, const Tensor & other, c10::optional rounding_mode, Tensor & out); // {"schema": "aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor divide(const Tensor & self, const Scalar & other, c10::optional rounding_mode); // {"schema": "aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & divide_(Tensor & self, const Scalar & other, c10::optional rounding_mode); // {"schema": "aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor true_divide(const Tensor & self, const Tensor & other); // {"schema": "aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & true_divide_(Tensor & self, const Tensor & other); // {"schema": "aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & true_divide_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor true_divide(const Tensor & self, const Scalar & other); // {"schema": "aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & true_divide_(Tensor & self, const Scalar & other); // {"schema": "aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor dot(const Tensor & self, const Tensor & tensor); // {"schema": "aten::dot(Tensor self, Tensor tensor) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & dot_out(const Tensor & self, const Tensor & tensor, Tensor & out); // {"schema": "aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor vdot(const Tensor & self, const Tensor & other); // {"schema": "aten::vdot(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & vdot_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor einsum(c10::string_view equation, TensorList tensors, OptionalIntArrayRef path); // {"schema": "aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor embedding(const Tensor & weight, const Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse); // {"schema": "aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor embedding_backward(const Tensor & grad, const Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse); // {"schema": "aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor", "dispatch": "False", "default": "True"} +Tensor embedding_dense_backward(const Tensor & grad_output, const Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq); // {"schema": "aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & embedding_renorm_(Tensor & self, const Tensor & indices, double max_norm, double norm_type); // {"schema": "aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor embedding_sparse_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); // {"schema": "aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _embedding_bag_forward_only(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx); // {"schema": "aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _rowwise_prune(const Tensor & weight, const Tensor & mask, ScalarType compressed_indices_dtype); // {"schema": "aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor row_stack(TensorList tensors); // {"schema": "aten::row_stack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & row_stack_out(TensorList tensors, Tensor & out); // {"schema": "aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset); // {"schema": "aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, c10::optional padding_idx); // {"schema": "aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple _embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx); // {"schema": "aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _embedding_bag_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx); // {"schema": "aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _embedding_bag_sparse_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx); // {"schema": "aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _embedding_bag_dense_backward(const Tensor & grad, const Tensor & indices, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx); // {"schema": "aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _embedding_bag_per_sample_weights_backward(const Tensor & grad, const Tensor & weight, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, int64_t mode, int64_t padding_idx); // {"schema": "aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor empty(IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor empty(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor empty_permuted(c10::SymIntArrayRef size, IntArrayRef physical_layout, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_empty(const Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_empty_strided(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_full(const Tensor & self, c10::SymIntArrayRef size, const Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_zeros(const Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_ones(const Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _empty_affine_quantized(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format); // {"schema": "aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _empty_per_channel_affine_quantized(c10::SymIntArrayRef size, const Tensor & scales, const Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor", "dispatch": "True", "default": "False"} +const Tensor & resize_(const Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format); // {"schema": "aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +const Tensor & _resize_output_(const Tensor & self, c10::SymIntArrayRef size, Device device); // {"schema": "aten::_resize_output_(Tensor(a!) self, SymInt[] size, Device device) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor empty_quantized(IntArrayRef size, const Tensor & qtensor, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & empty_out(c10::SymIntArrayRef size, c10::optional memory_format, Tensor & out); // {"schema": "aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor empty_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor empty_strided(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor erf(const Tensor & self); // {"schema": "aten::erf(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & erf_(Tensor & self); // {"schema": "aten::erf_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & erf_out(const Tensor & self, Tensor & out); // {"schema": "aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor erfc(const Tensor & self); // {"schema": "aten::erfc(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & erfc_(Tensor & self); // {"schema": "aten::erfc_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & erfc_out(const Tensor & self, Tensor & out); // {"schema": "aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor exp(const Tensor & self); // {"schema": "aten::exp(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & exp_(Tensor & self); // {"schema": "aten::exp_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & exp_out(const Tensor & self, Tensor & out); // {"schema": "aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor exp2(const Tensor & self); // {"schema": "aten::exp2(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & exp2_(Tensor & self); // {"schema": "aten::exp2_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & exp2_out(const Tensor & self, Tensor & out); // {"schema": "aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor expm1(const Tensor & self); // {"schema": "aten::expm1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & expm1_(Tensor & self); // {"schema": "aten::expm1_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & expm1_out(const Tensor & self, Tensor & out); // {"schema": "aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor expand(const Tensor & self, c10::SymIntArrayRef size, bool implicit); // {"schema": "aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor expand_as(const Tensor & self, const Tensor & other); // {"schema": "aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor eye(c10::SymInt n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor eye(c10::SymInt n, c10::SymInt m, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & eye_out(c10::SymInt n, Tensor & out); // {"schema": "aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & eye_out(c10::SymInt n, c10::SymInt m, Tensor & out); // {"schema": "aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor flatten(const Tensor & self, int64_t start_dim, int64_t end_dim); // {"schema": "aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor flatten(const Tensor & self, int64_t start_dim, int64_t end_dim, Dimname out_dim); // {"schema": "aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor flatten(const Tensor & self, Dimname start_dim, Dimname end_dim, Dimname out_dim); // {"schema": "aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor flatten(const Tensor & self, DimnameList dims, Dimname out_dim); // {"schema": "aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor unflatten(const Tensor & self, int64_t dim, c10::SymIntArrayRef sizes); // {"schema": "aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor unflatten(const Tensor & self, Dimname dim, c10::SymIntArrayRef sizes, DimnameList names); // {"schema": "aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor fill(const Tensor & self, const Scalar & value); // {"schema": "aten::fill.Scalar(Tensor self, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor fill(const Tensor & self, const Tensor & value); // {"schema": "aten::fill.Tensor(Tensor self, Tensor value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fill_(Tensor & self, const Scalar & value); // {"schema": "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & fill_(Tensor & self, const Tensor & value); // {"schema": "aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor floor(const Tensor & self); // {"schema": "aten::floor(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & floor_(Tensor & self); // {"schema": "aten::floor_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & floor_out(const Tensor & self, Tensor & out); // {"schema": "aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor floor_divide(const Tensor & self, const Tensor & other); // {"schema": "aten::floor_divide(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & floor_divide_(Tensor & self, const Tensor & other); // {"schema": "aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & floor_divide_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor floor_divide(const Tensor & self, const Scalar & other); // {"schema": "aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & floor_divide_(Tensor & self, const Scalar & other); // {"schema": "aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor frac(const Tensor & self); // {"schema": "aten::frac(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & frac_(Tensor & self); // {"schema": "aten::frac_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & frac_out(const Tensor & self, Tensor & out); // {"schema": "aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor full(IntArrayRef size, const Scalar & fill_value, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor full(c10::SymIntArrayRef size, const Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & full_out(c10::SymIntArrayRef size, const Scalar & fill_value, Tensor & out); // {"schema": "aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor full_like(const Tensor & self, const Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor from_file(c10::string_view filename, c10::optional shared, c10::optional size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & gcd_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gcd(const Tensor & self, const Tensor & other); // {"schema": "aten::gcd(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & gcd_(Tensor & self, const Tensor & other); // {"schema": "aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lcm_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lcm(const Tensor & self, const Tensor & other); // {"schema": "aten::lcm(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & lcm_(Tensor & self, const Tensor & other); // {"schema": "aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor grid_sampler(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", "dispatch": "False", "default": "True"} +Tensor grid_sampler_2d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple grid_sampler_2d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); // {"schema": "aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _grid_sampler_2d_cpu_fallback(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple _grid_sampler_2d_cpu_fallback_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor grid_sampler_3d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple grid_sampler_3d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); // {"schema": "aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor hann_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hann_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hamming_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hamming_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hamming_window(int64_t window_length, bool periodic, double alpha, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor kaiser_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor kaiser_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor kaiser_window(int64_t window_length, bool periodic, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hinge_embedding_loss(const Tensor & self, const Tensor & target, double margin, int64_t reduction); // {"schema": "aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor group_norm(const Tensor & input, int64_t num_groups, const c10::optional & weight, const c10::optional & bias, double eps, bool cudnn_enabled); // {"schema": "aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple native_group_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps); // {"schema": "aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "True"} +::std::tuple native_group_norm_backward(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask); // {"schema": "aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _fft_r2c(const Tensor & self, IntArrayRef dim, int64_t normalization, bool onesided); // {"schema": "aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _fft_r2c_out(const Tensor & self, IntArrayRef dim, int64_t normalization, bool onesided, Tensor & out); // {"schema": "aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _fft_c2r(const Tensor & self, IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size); // {"schema": "aten::_fft_c2r(Tensor self, int[] dim, int normalization, SymInt last_dim_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _fft_c2r_out(const Tensor & self, IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size, Tensor & out); // {"schema": "aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, SymInt last_dim_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _fft_c2c(const Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward); // {"schema": "aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _fft_c2c_out(const Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, Tensor & out); // {"schema": "aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +void _validate_compressed_sparse_indices(bool is_crow, const Tensor & compressed_idx, const Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz); // {"schema": "aten::_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()", "dispatch": "True", "default": "False"} +int64_t _cufft_get_plan_cache_size(DeviceIndex device_index); // {"schema": "aten::_cufft_get_plan_cache_size(DeviceIndex device_index) -> int", "dispatch": "False", "default": "True"} +int64_t _cufft_get_plan_cache_max_size(DeviceIndex device_index); // {"schema": "aten::_cufft_get_plan_cache_max_size(DeviceIndex device_index) -> int", "dispatch": "False", "default": "True"} +void _cufft_set_plan_cache_max_size(DeviceIndex device_index, int64_t max_size); // {"schema": "aten::_cufft_set_plan_cache_max_size(DeviceIndex device_index, int max_size) -> ()", "dispatch": "False", "default": "True"} +void _cufft_clear_plan_cache(DeviceIndex device_index); // {"schema": "aten::_cufft_clear_plan_cache(DeviceIndex device_index) -> ()", "dispatch": "False", "default": "True"} +Tensor index(const Tensor & self, const c10::List> & indices); // {"schema": "aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_out(const Tensor & self, const c10::List> & indices, Tensor & out); // {"schema": "aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _unsafe_index(const Tensor & self, const c10::List> & indices); // {"schema": "aten::_unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_copy_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, Tensor & out); // {"schema": "aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); // {"schema": "aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor index_copy(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); // {"schema": "aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_copy_(Tensor & self, Dimname dim, const Tensor & index, const Tensor & source); // {"schema": "aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor index_copy(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source); // {"schema": "aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & index_put_(Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate); // {"schema": "aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor index_put(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate); // {"schema": "aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _unsafe_index_put(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate); // {"schema": "aten::_unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _index_put_impl_(Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate, bool unsafe); // {"schema": "aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor instance_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); // {"schema": "aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor", "dispatch": "False", "default": "True"} +Tensor isclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan); // {"schema": "aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & isin_out(const Tensor & elements, const Tensor & test_elements, bool assume_unique, bool invert, Tensor & out); // {"schema": "aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor isin(const Tensor & elements, const Tensor & test_elements, bool assume_unique, bool invert); // {"schema": "aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isin_out(const Tensor & elements, const Scalar & test_element, bool assume_unique, bool invert, Tensor & out); // {"schema": "aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor isin(const Tensor & elements, const Scalar & test_element, bool assume_unique, bool invert); // {"schema": "aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isin_out(const Scalar & element, const Tensor & test_elements, bool assume_unique, bool invert, Tensor & out); // {"schema": "aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor isin(const Scalar & element, const Tensor & test_elements, bool assume_unique, bool invert); // {"schema": "aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor isnan(const Tensor & self); // {"schema": "aten::isnan(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +bool is_distributed(const Tensor & self); // {"schema": "aten::is_distributed(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_floating_point(const Tensor & self); // {"schema": "aten::is_floating_point(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_complex(const Tensor & self); // {"schema": "aten::is_complex(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_conj(const Tensor & self); // {"schema": "aten::is_conj(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool _is_zerotensor(const Tensor & self); // {"schema": "aten::_is_zerotensor(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_neg(const Tensor & self); // {"schema": "aten::is_neg(Tensor self) -> bool", "dispatch": "False", "default": "True"} +Tensor isreal(const Tensor & self); // {"schema": "aten::isreal(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +bool is_nonzero(const Tensor & self); // {"schema": "aten::is_nonzero(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_same_size(const Tensor & self, const Tensor & other); // {"schema": "aten::is_same_size(Tensor self, Tensor other) -> bool", "dispatch": "True", "default": "True"} +bool is_signed(const Tensor & self); // {"schema": "aten::is_signed(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_inference(const Tensor & self); // {"schema": "aten::is_inference(Tensor self) -> bool", "dispatch": "False", "default": "True"} +Tensor kl_div(const Tensor & self, const Tensor & target, int64_t reduction, bool log_target); // {"schema": "aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor kron(const Tensor & self, const Tensor & other); // {"schema": "aten::kron(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & kron_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim); // {"schema": "aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple kthvalue_out(const Tensor & self, int64_t k, int64_t dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple kthvalue(const Tensor & self, int64_t k, Dimname dim, bool keepdim); // {"schema": "aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple kthvalue_out(const Tensor & self, int64_t k, Dimname dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor layer_norm(const Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps, bool cudnn_enable); // {"schema": "aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple native_layer_norm(const Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps); // {"schema": "aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "True"} +::std::tuple native_layer_norm_backward(const Tensor & grad_out, const Tensor & input, c10::SymIntArrayRef normalized_shape, const Tensor & mean, const Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask); // {"schema": "aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor nan_to_num(const Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf); // {"schema": "aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & nan_to_num_(Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf); // {"schema": "aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & nan_to_num_out(const Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf, Tensor & out); // {"schema": "aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor linear(const Tensor & input, const Tensor & weight, const c10::optional & bias); // {"schema": "aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple linear_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, ::std::array output_mask); // {"schema": "aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & linear_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, Tensor & out); // {"schema": "aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor mkldnn_linear(const Tensor & self, const Tensor & weight, const c10::optional & bias); // {"schema": "aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_linear_backward_input(IntArrayRef input_size, const Tensor & grad_output, const Tensor & weight); // {"schema": "aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple mkldnn_linear_backward_weights(const Tensor & grad_output, const Tensor & input, const Tensor & weight, bool bias_defined); // {"schema": "aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple mkldnn_linear_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, ::std::array output_mask); // {"schema": "aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _cslt_compress(const Tensor & input); // {"schema": "aten::_cslt_compress(Tensor input) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _cslt_sparse_mm(const Tensor & compressed_A, const Tensor & dense_B, const c10::optional & bias, const c10::optional & alpha, c10::optional out_dtype, bool transpose_result, int64_t alg_id); // {"schema": "aten::_cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0) -> Tensor", "dispatch": "True", "default": "False"} +int64_t _cslt_sparse_mm_search(const Tensor & compressed_A, const Tensor & dense_B, const c10::optional & bias, const c10::optional & alpha, c10::optional out_dtype, bool transpose_result); // {"schema": "aten::_cslt_sparse_mm_search(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False) -> int", "dispatch": "True", "default": "False"} +Tensor _sparse_semi_structured_linear(const Tensor & input, const Tensor & weight, const Tensor & meta, const c10::optional & bias, c10::optional activation, c10::optional out_dtype); // {"schema": "aten::_sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _mixed_dtypes_linear(const Tensor & input, const Tensor & weight, const Tensor & scale, const c10::optional & bias, c10::optional activation); // {"schema": "aten::_mixed_dtypes_linear(Tensor input, Tensor weight, Tensor scale, *, Tensor? bias=None, str? activation=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor fbgemm_linear_int8_weight_fp32_activation(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, const Scalar & weight_scale, const Scalar & weight_zero_point, const Tensor & bias); // {"schema": "aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_linear_int8_weight(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, const Scalar & weight_scale, const Scalar & weight_zero_point, const Tensor & bias); // {"schema": "aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple fbgemm_linear_quantize_weight(const Tensor & input); // {"schema": "aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)", "dispatch": "False", "default": "True"} +Tensor fbgemm_pack_gemm_matrix_fp16(const Tensor & input); // {"schema": "aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_linear_fp16_weight_fp32_activation(const Tensor & input, const Tensor & packed_weight, const Tensor & bias); // {"schema": "aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_linear_fp16_weight(const Tensor & input, const Tensor & packed_weight, const Tensor & bias); // {"schema": "aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_pack_quantized_matrix(const Tensor & input); // {"schema": "aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_pack_quantized_matrix(const Tensor & input, int64_t K, int64_t N); // {"schema": "aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor", "dispatch": "False", "default": "True"} +Tensor ldexp(const Tensor & self, const Tensor & other); // {"schema": "aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & ldexp_(Tensor & self, const Tensor & other); // {"schema": "aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & ldexp_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linspace(const Scalar & start, const Scalar & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor linspace(const Tensor & start, const Tensor & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor linspace(const Tensor & start, const Scalar & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor linspace(const Scalar & start, const Tensor & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linspace_out(const Scalar & start, const Scalar & end, int64_t steps, Tensor & out); // {"schema": "aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & linspace_out(const Tensor & start, const Tensor & end, int64_t steps, Tensor & out); // {"schema": "aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & linspace_out(const Tensor & start, const Scalar & end, int64_t steps, Tensor & out); // {"schema": "aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & linspace_out(const Scalar & start, const Tensor & end, int64_t steps, Tensor & out); // {"schema": "aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor log(const Tensor & self); // {"schema": "aten::log(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log_(Tensor & self); // {"schema": "aten::log_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log_out(const Tensor & self, Tensor & out); // {"schema": "aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log10(const Tensor & self); // {"schema": "aten::log10(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log10_(Tensor & self); // {"schema": "aten::log10_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log10_out(const Tensor & self, Tensor & out); // {"schema": "aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log1p(const Tensor & self); // {"schema": "aten::log1p(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log1p_(Tensor & self); // {"schema": "aten::log1p_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log1p_out(const Tensor & self, Tensor & out); // {"schema": "aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log2(const Tensor & self); // {"schema": "aten::log2(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log2_(Tensor & self); // {"schema": "aten::log2_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log2_out(const Tensor & self, Tensor & out); // {"schema": "aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & logaddexp_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logaddexp(const Tensor & self, const Tensor & other); // {"schema": "aten::logaddexp(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logaddexp2_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logaddexp2(const Tensor & self, const Tensor & other); // {"schema": "aten::logaddexp2(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor xlogy(const Tensor & self, const Tensor & other); // {"schema": "aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor xlogy(const Scalar & self, const Tensor & other); // {"schema": "aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor xlogy(const Tensor & self, const Scalar & other); // {"schema": "aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & xlogy_(Tensor & self, const Tensor & other); // {"schema": "aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & xlogy_(Tensor & self, const Scalar & other); // {"schema": "aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & xlogy_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & xlogy_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & xlogy_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor logspace(const Scalar & start, const Scalar & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor logspace(const Tensor & start, const Tensor & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor logspace(const Tensor & start, const Scalar & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor logspace(const Scalar & start, const Tensor & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logspace_out(const Scalar & start, const Scalar & end, int64_t steps, double base, Tensor & out); // {"schema": "aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & logspace_out(const Tensor & start, const Tensor & end, int64_t steps, double base, Tensor & out); // {"schema": "aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logspace_out(const Tensor & start, const Scalar & end, int64_t steps, double base, Tensor & out); // {"schema": "aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logspace_out(const Scalar & start, const Tensor & end, int64_t steps, double base, Tensor & out); // {"schema": "aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor log_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & log_softmax_out(const Tensor & self, int64_t dim, c10::optional dtype, Tensor & out); // {"schema": "aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor log_softmax(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _log_softmax(const Tensor & self, int64_t dim, bool half_to_float); // {"schema": "aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _log_softmax_out(const Tensor & self, int64_t dim, bool half_to_float, Tensor & out); // {"schema": "aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _log_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, ScalarType input_dtype); // {"schema": "aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _log_softmax_backward_data_out(const Tensor & grad_output, const Tensor & output, int64_t dim, ScalarType input_dtype, Tensor & out); // {"schema": "aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _logcumsumexp(const Tensor & self, int64_t dim); // {"schema": "aten::_logcumsumexp(Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _logcumsumexp_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logcumsumexp(const Tensor & self, int64_t dim); // {"schema": "aten::logcumsumexp(Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logcumsumexp_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor logcumsumexp(const Tensor & self, Dimname dim); // {"schema": "aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & logcumsumexp_out(const Tensor & self, Dimname dim, Tensor & out); // {"schema": "aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor logsumexp(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logsumexp_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor logsumexp(const Tensor & self, DimnameList dim, bool keepdim); // {"schema": "aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & logsumexp_out(const Tensor & self, DimnameList dim, bool keepdim, Tensor & out); // {"schema": "aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor margin_ranking_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction); // {"schema": "aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor matmul(const Tensor & self, const Tensor & other); // {"schema": "aten::matmul(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple matmul_backward(const Tensor & grad, const Tensor & self, const Tensor & other, ::std::array mask); // {"schema": "aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & matmul_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor matrix_power(const Tensor & self, int64_t n); // {"schema": "aten::matrix_power(Tensor self, int n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & matrix_power_out(const Tensor & self, int64_t n, Tensor & out); // {"schema": "aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor matrix_exp(const Tensor & self); // {"schema": "aten::matrix_exp(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor matrix_exp_backward(const Tensor & self, const Tensor & grad); // {"schema": "aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _aminmax(const Tensor & self); // {"schema": "aten::_aminmax(Tensor self) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _aminmax(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple aminmax(const Tensor & self, c10::optional dim, bool keepdim); // {"schema": "aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)", "dispatch": "True", "default": "True"} +::std::tuple aminmax_out(const Tensor & self, c10::optional dim, bool keepdim, Tensor & min, Tensor & max); // {"schema": "aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)", "dispatch": "True", "default": "False"} +Tensor _compute_linear_combination(const Tensor & input, const Tensor & coefficients); // {"schema": "aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _compute_linear_combination_out(const Tensor & input, const Tensor & coefficients, Tensor & out); // {"schema": "aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple max(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple max_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & max, Tensor & max_values); // {"schema": "aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple max(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple max_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & max, Tensor & max_values); // {"schema": "aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor value_selecting_reduction_backward(const Tensor & grad, int64_t dim, const Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim); // {"schema": "aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor amax(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & amax_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple max_pool1d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor max_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_max_pool2d_backward(const Tensor & grad_output, const Tensor & output, const Tensor & input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_max_pool3d_backward(const Tensor & grad_output, const Tensor & output, const Tensor & input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantized_max_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantized_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantized_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor mean(const Tensor & self, c10::optional dtype); // {"schema": "aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor mean(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mean_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mean(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype); // {"schema": "aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & mean_out(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nanmean(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nanmean_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor median(const Tensor & self); // {"schema": "aten::median(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple median(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple median_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple median(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple median_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor nanmedian(const Tensor & self); // {"schema": "aten::nanmedian(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple nanmedian(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple nanmedian_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple nanmedian(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple nanmedian_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +::std::tuple min(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple min_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & min, Tensor & min_indices); // {"schema": "aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple min(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple min_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & min, Tensor & min_indices); // {"schema": "aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor amin(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & amin_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _mps_convolution(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple mps_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask); // {"schema": "aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor mkldnn_convolution(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple mkldnn_rnn_layer(const Tensor & input, const Tensor & weight0, const Tensor & weight1, const Tensor & weight2, const Tensor & weight3, const Tensor & hx_, const Tensor & cx_, bool reverse, IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train); // {"schema": "aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple mkldnn_rnn_layer_backward(const Tensor & input, const Tensor & weight1, const Tensor & weight2, const Tensor & weight3, const Tensor & weight4, const Tensor & hx_, const Tensor & cx_tmp, const Tensor & output, const Tensor & hy_, const Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, IntArrayRef batch_sizes, bool batch_first, const Tensor & workspace); // {"schema": "aten::mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple miopen_batch_norm(const Tensor & input, const Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); // {"schema": "aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple miopen_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon); // {"schema": "aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor miopen_convolution(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic); // {"schema": "aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor", "dispatch": "True", "default": "False"} +Tensor miopen_convolution_transpose(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic); // {"schema": "aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor", "dispatch": "True", "default": "False"} +Tensor miopen_depthwise_convolution(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic); // {"schema": "aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor", "dispatch": "True", "default": "False"} +Tensor miopen_convolution_relu(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +Tensor miopen_convolution_add_relu(const Tensor & self, const Tensor & weight, const Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple miopen_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional & dropout_state); // {"schema": "aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple> miopen_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const c10::optional & cx, const Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional & dropout_state, const Tensor & reserve, ::std::array output_mask); // {"schema": "aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])", "dispatch": "True", "default": "False"} +Tensor mm(const Tensor & self, const Tensor & mat2); // {"schema": "aten::mm(Tensor self, Tensor mat2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mm_out(const Tensor & self, const Tensor & mat2, Tensor & out); // {"schema": "aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _int_mm(const Tensor & self, const Tensor & mat2); // {"schema": "aten::_int_mm(Tensor self, Tensor mat2) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _int_mm_out(const Tensor & self, const Tensor & mat2, Tensor & out); // {"schema": "aten::_int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _convert_weight_to_int4pack(const Tensor & self, int64_t innerKTiles); // {"schema": "aten::_convert_weight_to_int4pack(Tensor self, int innerKTiles) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _weight_int4pack_mm(const Tensor & self, const Tensor & mat2, int64_t qGroupSize, const Tensor & qScaleAndZeros); // {"schema": "aten::_weight_int4pack_mm(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _weight_int8pack_mm(const Tensor & self, const Tensor & mat2, const Tensor & scales); // {"schema": "aten::_weight_int8pack_mm(Tensor self, Tensor mat2, Tensor scales) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_mm(const Tensor & sparse, const Tensor & dense); // {"schema": "aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_mm(const Tensor & sparse, const Tensor & dense, c10::string_view reduce); // {"schema": "aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_sparse_matmul(const Tensor & self, const Tensor & other); // {"schema": "aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple mode(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "False"} +::std::tuple mode_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "True"} +::std::tuple mode(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple mode_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor mul(const Tensor & self, const Tensor & other); // {"schema": "aten::mul.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mul_(Tensor & self, const Tensor & other); // {"schema": "aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mul_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mul(const Tensor & self, const Scalar & other); // {"schema": "aten::mul.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mul_(Tensor & self, const Scalar & other); // {"schema": "aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor multiply(const Tensor & self, const Tensor & other); // {"schema": "aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & multiply_(Tensor & self, const Tensor & other); // {"schema": "aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & multiply_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor multiply(const Tensor & self, const Scalar & other); // {"schema": "aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & multiply_(Tensor & self, const Scalar & other); // {"schema": "aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor mv(const Tensor & self, const Tensor & vec); // {"schema": "aten::mv(Tensor self, Tensor vec) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mv_out(const Tensor & self, const Tensor & vec, Tensor & out); // {"schema": "aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mvlgamma_out(const Tensor & self, int64_t p, Tensor & out); // {"schema": "aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mvlgamma(const Tensor & self, int64_t p); // {"schema": "aten::mvlgamma(Tensor self, int p) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mvlgamma_(Tensor & self, int64_t p); // {"schema": "aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor narrow_copy(const Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); // {"schema": "aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & narrow_copy_out(const Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, Tensor & out); // {"schema": "aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor narrow(const Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); // {"schema": "aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor narrow(const Tensor & self, int64_t dim, const Tensor & start, c10::SymInt length); // {"schema": "aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)", "dispatch": "False", "default": "True"} +::std::tuple native_batch_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps); // {"schema": "aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple native_batch_norm_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, Tensor & out, Tensor & save_mean, Tensor & save_invstd); // {"schema": "aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "False"} +::std::tuple _native_batch_norm_legit(const Tensor & input, const c10::optional & weight, const c10::optional & bias, Tensor & running_mean, Tensor & running_var, bool training, double momentum, double eps); // {"schema": "aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _native_batch_norm_legit_no_training(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & running_mean, const Tensor & running_var, double momentum, double eps); // {"schema": "aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "True"} +::std::tuple _native_batch_norm_legit_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, Tensor & running_mean, Tensor & running_var, bool training, double momentum, double eps, Tensor & out, Tensor & save_mean, Tensor & save_invstd); // {"schema": "aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))", "dispatch": "True", "default": "False"} +::std::tuple _native_batch_norm_legit(const Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps); // {"schema": "aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _native_batch_norm_legit_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps, Tensor & out, Tensor & save_mean, Tensor & save_invstd); // {"schema": "aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_stats(const Tensor & input, double eps); // {"schema": "aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor batch_norm_elemt(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & mean, const Tensor & invstd, double eps); // {"schema": "aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & batch_norm_elemt_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & mean, const Tensor & invstd, double eps, Tensor & out); // {"schema": "aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_gather_stats(const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, int64_t count); // {"schema": "aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_gather_stats_with_counts(const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const Tensor & counts); // {"schema": "aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple native_batch_norm_backward(const Tensor & grad_out, const Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask); // {"schema": "aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_backward_reduce(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g); // {"schema": "aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor batch_norm_backward_elemt(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & weight, const Tensor & sum_dy, const Tensor & sum_dy_xmu, const Tensor & count); // {"schema": "aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_update_stats(const Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum); // {"schema": "aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +bool is_vulkan_available(); // {"schema": "aten::is_vulkan_available() -> bool", "dispatch": "False", "default": "True"} +bool _nnpack_available(); // {"schema": "aten::_nnpack_available() -> bool", "dispatch": "False", "default": "True"} +Tensor _nnpack_spatial_convolution(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride); // {"schema": "aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor ones(IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor ones(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ones_out(c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor ones_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor pairwise_distance(const Tensor & x1, const Tensor & x2, double p, double eps, bool keepdim); // {"schema": "aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cdist(const Tensor & x1, const Tensor & x2, double p, c10::optional compute_mode); // {"schema": "aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _euclidean_dist(const Tensor & x1, const Tensor & x2); // {"schema": "aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _cdist_forward(const Tensor & x1, const Tensor & x2, double p, c10::optional compute_mode); // {"schema": "aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _cdist_backward(const Tensor & grad, const Tensor & x1, const Tensor & x2, double p, const Tensor & cdist); // {"schema": "aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor", "dispatch": "True", "default": "False"} +Tensor pdist(const Tensor & self, double p); // {"schema": "aten::pdist(Tensor self, float p=2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _pdist_forward(const Tensor & self, double p); // {"schema": "aten::_pdist_forward(Tensor self, float p=2) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _pdist_backward(const Tensor & grad, const Tensor & self, double p, const Tensor & pdist); // {"schema": "aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cosine_similarity(const Tensor & x1, const Tensor & x2, int64_t dim, double eps); // {"schema": "aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor", "dispatch": "False", "default": "True"} +Tensor permute(const Tensor & self, IntArrayRef dims); // {"schema": "aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor movedim(const Tensor & self, IntArrayRef source, IntArrayRef destination); // {"schema": "aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor movedim(const Tensor & self, int64_t source, int64_t destination); // {"schema": "aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor moveaxis(const Tensor & self, IntArrayRef source, IntArrayRef destination); // {"schema": "aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor moveaxis(const Tensor & self, int64_t source, int64_t destination); // {"schema": "aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor numpy_T(const Tensor & self); // {"schema": "aten::numpy_T(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor matrix_H(const Tensor & self); // {"schema": "aten::matrix_H(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor mT(const Tensor & self); // {"schema": "aten::mT(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor mH(const Tensor & self); // {"schema": "aten::mH(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor adjoint(const Tensor & self); // {"schema": "aten::adjoint(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor pixel_shuffle(const Tensor & self, int64_t upscale_factor); // {"schema": "aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor", "dispatch": "True", "default": "True"} +Tensor pixel_unshuffle(const Tensor & self, int64_t downscale_factor); // {"schema": "aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor", "dispatch": "True", "default": "True"} +Tensor channel_shuffle(const Tensor & self, c10::SymInt groups); // {"schema": "aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +Tensor native_channel_shuffle(const Tensor & self, c10::SymInt groups); // {"schema": "aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor", "dispatch": "True", "default": "True"} +bool is_pinned(const Tensor & self, c10::optional device); // {"schema": "aten::is_pinned(Tensor self, Device? device=None) -> bool", "dispatch": "True", "default": "True"} +Tensor pin_memory(const Tensor & self, c10::optional device); // {"schema": "aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _pin_memory(const Tensor & self, c10::optional device); // {"schema": "aten::_pin_memory(Tensor self, Device? device=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor pinverse(const Tensor & self, double rcond); // {"schema": "aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor", "dispatch": "False", "default": "True"} +Tensor poisson_nll_loss(const Tensor & input, const Tensor & target, bool log_input, bool full, double eps, int64_t reduction); // {"schema": "aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor", "dispatch": "False", "default": "True"} +Tensor rad2deg(const Tensor & self); // {"schema": "aten::rad2deg(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & rad2deg_(Tensor & self); // {"schema": "aten::rad2deg_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rad2deg_out(const Tensor & self, Tensor & out); // {"schema": "aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor deg2rad(const Tensor & self); // {"schema": "aten::deg2rad(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & deg2rad_(Tensor & self); // {"schema": "aten::deg2rad_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & deg2rad_out(const Tensor & self, Tensor & out); // {"schema": "aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor scalar_tensor(const Scalar & s, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rand(c10::SymIntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rand(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rand(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rand(c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & rand_out(c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rand_out(c10::SymIntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor rand_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint(c10::SymInt high, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint(c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & randint_out(c10::SymInt high, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_out(c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_out(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_out(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor randint_like(const Tensor & self, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint_like(const Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randn(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randn(c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randn(c10::SymIntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randn(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & randn_out(c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & randn_out(c10::SymIntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor randn_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randperm(c10::SymInt n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randperm(c10::SymInt n, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & randperm_out(c10::SymInt n, Tensor & out); // {"schema": "aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randperm_out(c10::SymInt n, c10::optional generator, Tensor & out); // {"schema": "aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor range(const Scalar & start, const Scalar & end, const Scalar & step, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor range(const Scalar & start, const Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & range_out(const Scalar & start, const Scalar & end, Tensor & out); // {"schema": "aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & range_out(const Scalar & start, const Scalar & end, const Scalar & step, Tensor & out); // {"schema": "aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ravel(const Tensor & self); // {"schema": "aten::ravel(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor reciprocal(const Tensor & self); // {"schema": "aten::reciprocal(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & reciprocal_(Tensor & self); // {"schema": "aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & reciprocal_out(const Tensor & self, Tensor & out); // {"schema": "aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor neg(const Tensor & self); // {"schema": "aten::neg(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & neg_(Tensor & self); // {"schema": "aten::neg_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & neg_out(const Tensor & self, Tensor & out); // {"schema": "aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor negative(const Tensor & self); // {"schema": "aten::negative(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & negative_(Tensor & self); // {"schema": "aten::negative_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & negative_out(const Tensor & self, Tensor & out); // {"schema": "aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor repeat(const Tensor & self, c10::SymIntArrayRef repeats); // {"schema": "aten::repeat(Tensor self, SymInt[] repeats) -> Tensor", "dispatch": "True", "default": "True"} +Tensor repeat_interleave(const Tensor & repeats, c10::optional output_size); // {"schema": "aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor repeat_interleave(const Tensor & self, const Tensor & repeats, c10::optional dim, c10::optional output_size); // {"schema": "aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor repeat_interleave(const Tensor & self, c10::SymInt repeats, c10::optional dim, c10::optional output_size); // {"schema": "aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor reshape(const Tensor & self, c10::SymIntArrayRef shape); // {"schema": "aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _reshape_copy(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _reshape_alias(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor _mkldnn_reshape(const Tensor & self, IntArrayRef shape); // {"schema": "aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor", "dispatch": "True", "default": "False"} +Tensor reshape_as(const Tensor & self, const Tensor & other); // {"schema": "aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor round(const Tensor & self); // {"schema": "aten::round(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & round_(Tensor & self); // {"schema": "aten::round_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & round_out(const Tensor & self, Tensor & out); // {"schema": "aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor round(const Tensor & self, int64_t decimals); // {"schema": "aten::round.decimals(Tensor self, *, int decimals) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & round_(Tensor & self, int64_t decimals); // {"schema": "aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & round_out(const Tensor & self, int64_t decimals, Tensor & out); // {"schema": "aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor rrelu(const Tensor & self, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator); // {"schema": "aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & rrelu_(Tensor & self, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator); // {"schema": "aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor relu(const Tensor & self); // {"schema": "aten::relu(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & relu_(Tensor & self); // {"schema": "aten::relu_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor relu6(const Tensor & self); // {"schema": "aten::relu6(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & relu6_(Tensor & self); // {"schema": "aten::relu6_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor prelu(const Tensor & self, const Tensor & weight); // {"schema": "aten::prelu(Tensor self, Tensor weight) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _prelu_kernel(const Tensor & self, const Tensor & weight); // {"schema": "aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _prelu_kernel_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight); // {"schema": "aten::_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & gelu_out(const Tensor & self, c10::string_view approximate, Tensor & out); // {"schema": "aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & gelu_(Tensor & self, c10::string_view approximate); // {"schema": "aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor gelu(const Tensor & self, c10::string_view approximate); // {"schema": "aten::gelu(Tensor self, *, str approximate='none') -> Tensor", "dispatch": "True", "default": "True"} +Tensor & gelu_backward_out(const Tensor & grad_output, const Tensor & self, c10::string_view approximate, Tensor & grad_input); // {"schema": "aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gelu_backward(const Tensor & grad_output, const Tensor & self, c10::string_view approximate); // {"schema": "aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor", "dispatch": "True", "default": "True"} +Tensor infinitely_differentiable_gelu_backward(const Tensor & grad, const Tensor & self); // {"schema": "aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & hardshrink_out(const Tensor & self, const Scalar & lambd, Tensor & out); // {"schema": "aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardshrink(const Tensor & self, const Scalar & lambd); // {"schema": "aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & hardshrink_backward_out(const Tensor & grad_out, const Tensor & self, const Scalar & lambd, Tensor & grad_input); // {"schema": "aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardshrink_backward(const Tensor & grad_out, const Tensor & self, const Scalar & lambd); // {"schema": "aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rsqrt(const Tensor & self); // {"schema": "aten::rsqrt(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & rsqrt_(Tensor & self); // {"schema": "aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rsqrt_out(const Tensor & self, Tensor & out); // {"schema": "aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor select(const Tensor & self, Dimname dim, int64_t index); // {"schema": "aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor select(const Tensor & self, int64_t dim, c10::SymInt index); // {"schema": "aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor select_backward(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index); // {"schema": "aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _nested_select_backward(const Tensor & grad_output, const Tensor & self, int64_t dim, c10::SymInt index); // {"schema": "aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor", "dispatch": "True", "default": "False"} +Tensor selu(const Tensor & self); // {"schema": "aten::selu(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & selu_(Tensor & self); // {"schema": "aten::selu_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor celu(const Tensor & self, const Scalar & alpha); // {"schema": "aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & celu_(Tensor & self, const Scalar & alpha); // {"schema": "aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor silu(const Tensor & self); // {"schema": "aten::silu(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & silu_(Tensor & self); // {"schema": "aten::silu_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & silu_out(const Tensor & self, Tensor & out); // {"schema": "aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & silu_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & grad_input); // {"schema": "aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor silu_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor mish(const Tensor & self); // {"schema": "aten::mish(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mish_(Tensor & self); // {"schema": "aten::mish_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mish_out(const Tensor & self, Tensor & out); // {"schema": "aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mish_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sigmoid(const Tensor & self); // {"schema": "aten::sigmoid(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sigmoid_(Tensor & self); // {"schema": "aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sigmoid_out(const Tensor & self, Tensor & out); // {"schema": "aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logit(const Tensor & self, c10::optional eps); // {"schema": "aten::logit(Tensor self, float? eps=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & logit_(Tensor & self, c10::optional eps); // {"schema": "aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & logit_out(const Tensor & self, c10::optional eps, Tensor & out); // {"schema": "aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sin(const Tensor & self); // {"schema": "aten::sin(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sin_(Tensor & self); // {"schema": "aten::sin_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sin_out(const Tensor & self, Tensor & out); // {"schema": "aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sinc(const Tensor & self); // {"schema": "aten::sinc(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sinc_(Tensor & self); // {"schema": "aten::sinc_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sinc_out(const Tensor & self, Tensor & out); // {"schema": "aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sinh(const Tensor & self); // {"schema": "aten::sinh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sinh_(Tensor & self); // {"schema": "aten::sinh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sinh_out(const Tensor & self, Tensor & out); // {"schema": "aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor detach(const Tensor & self); // {"schema": "aten::detach(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & detach_(Tensor & self); // {"schema": "aten::detach_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +int64_t size(const Tensor & self, int64_t dim); // {"schema": "aten::size.int(Tensor self, int dim) -> int", "dispatch": "False", "default": "True"} +int64_t size(const Tensor & self, Dimname dim); // {"schema": "aten::size.Dimname(Tensor self, Dimname dim) -> int", "dispatch": "False", "default": "True"} +c10::SymInt sym_size(const Tensor & self, int64_t dim); // {"schema": "aten::sym_size.int(Tensor self, int dim) -> SymInt", "dispatch": "False", "default": "True"} +c10::SymInt sym_numel(const Tensor & self); // {"schema": "aten::sym_numel(Tensor self) -> SymInt", "dispatch": "False", "default": "True"} +c10::SymInt sym_storage_offset(const Tensor & self); // {"schema": "aten::sym_storage_offset(Tensor self) -> SymInt", "dispatch": "False", "default": "True"} +Tensor slice(const Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); // {"schema": "aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor slice_backward(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step); // {"schema": "aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor", "dispatch": "True", "default": "True"} +Tensor slice_inverse(const Tensor & self, const Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); // {"schema": "aten::slice_inverse(Tensor(a) self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor slice_scatter(const Tensor & self, const Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); // {"schema": "aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor select_scatter(const Tensor & self, const Tensor & src, int64_t dim, c10::SymInt index); // {"schema": "aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor", "dispatch": "True", "default": "True"} +Tensor diagonal_scatter(const Tensor & self, const Tensor & src, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor as_strided_scatter(const Tensor & self, const Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); // {"schema": "aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor smm(const Tensor & self, const Tensor & mat2); // {"schema": "aten::smm(Tensor self, Tensor mat2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & softmax_out(const Tensor & self, int64_t dim, c10::optional dtype, Tensor & out); // {"schema": "aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor softmax(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _softmax(const Tensor & self, int64_t dim, bool half_to_float); // {"schema": "aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _softmax_out(const Tensor & self, int64_t dim, bool half_to_float, Tensor & out); // {"schema": "aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, ScalarType input_dtype); // {"schema": "aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _softmax_backward_data_out(const Tensor & grad_output, const Tensor & output, int64_t dim, ScalarType input_dtype, Tensor & grad_input); // {"schema": "aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::vector unsafe_split(const Tensor & self, c10::SymInt split_size, int64_t dim); // {"schema": "aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +::std::vector split(const Tensor & self, c10::SymInt split_size, int64_t dim); // {"schema": "aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]", "dispatch": "True", "default": "True"} +::std::vector split(const Tensor & self, c10::SymIntArrayRef split_size, int64_t dim); // {"schema": "aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector unsafe_split_with_sizes(const Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim); // {"schema": "aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +::std::vector split_with_sizes(const Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim); // {"schema": "aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]", "dispatch": "True", "default": "True"} +::std::vector hsplit(const Tensor & self, int64_t sections); // {"schema": "aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector hsplit(const Tensor & self, IntArrayRef indices); // {"schema": "aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector vsplit(const Tensor & self, int64_t sections); // {"schema": "aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector vsplit(const Tensor & self, IntArrayRef indices); // {"schema": "aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector dsplit(const Tensor & self, int64_t sections); // {"schema": "aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector dsplit(const Tensor & self, IntArrayRef indices); // {"schema": "aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +Tensor squeeze(const Tensor & self); // {"schema": "aten::squeeze(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor squeeze(const Tensor & self, int64_t dim); // {"schema": "aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor squeeze(const Tensor & self, Dimname dim); // {"schema": "aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor squeeze(const Tensor & self, IntArrayRef dim); // {"schema": "aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & squeeze_(Tensor & self); // {"schema": "aten::squeeze_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_(Tensor & self, int64_t dim); // {"schema": "aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_(Tensor & self, IntArrayRef dim); // {"schema": "aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_(Tensor & self, Dimname dim); // {"schema": "aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor sspaddmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & sspaddmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _chunk_cat(TensorList tensors, int64_t dim, int64_t num_chunks); // {"schema": "aten::_chunk_cat(Tensor[] tensors, int dim, int num_chunks) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _chunk_cat_out(TensorList tensors, int64_t dim, int64_t num_chunks, Tensor & out); // {"schema": "aten::_chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor stack(TensorList tensors, int64_t dim); // {"schema": "aten::stack(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & stack_out(TensorList tensors, int64_t dim, Tensor & out); // {"schema": "aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _stack(TensorList tensors, int64_t dim); // {"schema": "aten::_stack(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _stack_out(TensorList tensors, int64_t dim, Tensor & out); // {"schema": "aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor hstack(TensorList tensors); // {"schema": "aten::hstack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & hstack_out(TensorList tensors, Tensor & out); // {"schema": "aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor vstack(TensorList tensors); // {"schema": "aten::vstack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & vstack_out(TensorList tensors, Tensor & out); // {"schema": "aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor dstack(TensorList tensors); // {"schema": "aten::dstack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & dstack_out(TensorList tensors, Tensor & out); // {"schema": "aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor stft(const Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool normalized, c10::optional onesided, c10::optional return_complex); // {"schema": "aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor stft(const Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional onesided, c10::optional return_complex); // {"schema": "aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode=\"reflect\", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor istft(const Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool center, bool normalized, c10::optional onesided, c10::optional length, bool return_complex); // {"schema": "aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor", "dispatch": "False", "default": "True"} +int64_t stride(const Tensor & self, int64_t dim); // {"schema": "aten::stride.int(Tensor self, int dim) -> int", "dispatch": "False", "default": "True"} +int64_t stride(const Tensor & self, Dimname dim); // {"schema": "aten::stride.Dimname(Tensor self, Dimname dim) -> int", "dispatch": "False", "default": "True"} +c10::SymInt sym_stride(const Tensor & self, int64_t dim); // {"schema": "aten::sym_stride.int(Tensor self, int dim) -> SymInt", "dispatch": "False", "default": "True"} +Tensor sum(const Tensor & self, c10::optional dtype); // {"schema": "aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sum(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sum(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype); // {"schema": "aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & sum_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & sum_out(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor _nested_sum_backward(const Tensor & grad, const Tensor & self, OptionalIntArrayRef dim, bool keepdim); // {"schema": "aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor nansum(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & nansum_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sum_to_size(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sqrt(const Tensor & self); // {"schema": "aten::sqrt(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sqrt_(Tensor & self); // {"schema": "aten::sqrt_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sqrt_out(const Tensor & self, Tensor & out); // {"schema": "aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor square(const Tensor & self); // {"schema": "aten::square(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & square_(Tensor & self); // {"schema": "aten::square_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & square_out(const Tensor & self, Tensor & out); // {"schema": "aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor std(const Tensor & self, bool unbiased); // {"schema": "aten::std(Tensor self, bool unbiased=True) -> Tensor", "dispatch": "False", "default": "True"} +Tensor std(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim); // {"schema": "aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor std(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple std_mean(const Tensor & self, bool unbiased); // {"schema": "aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple std_mean(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim); // {"schema": "aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple std_mean(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple std_mean(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); // {"schema": "aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple std_mean(const Tensor & self, DimnameList dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor & std_out(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim, Tensor & out); // {"schema": "aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & std_out(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, Tensor & out); // {"schema": "aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor std(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); // {"schema": "aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & std_out(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim, Tensor & out); // {"schema": "aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor std(const Tensor & self, DimnameList dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & std_out(const Tensor & self, DimnameList dim, const c10::optional & correction, bool keepdim, Tensor & out); // {"schema": "aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor prod(const Tensor & self, c10::optional dtype); // {"schema": "aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor prod(const Tensor & self, int64_t dim, bool keepdim, c10::optional dtype); // {"schema": "aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & prod_out(const Tensor & self, int64_t dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor prod(const Tensor & self, Dimname dim, bool keepdim, c10::optional dtype); // {"schema": "aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & prod_out(const Tensor & self, Dimname dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor t(const Tensor & self); // {"schema": "aten::t(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & t_(Tensor & self); // {"schema": "aten::t_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor tan(const Tensor & self); // {"schema": "aten::tan(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tan_(Tensor & self); // {"schema": "aten::tan_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & tan_out(const Tensor & self, Tensor & out); // {"schema": "aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor tanh(const Tensor & self); // {"schema": "aten::tanh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tanh_(Tensor & self); // {"schema": "aten::tanh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & tanh_out(const Tensor & self, Tensor & out); // {"schema": "aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor tensordot(const Tensor & self, const Tensor & other, IntArrayRef dims_self, IntArrayRef dims_other); // {"schema": "aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & tensordot_out(const Tensor & self, const Tensor & other, IntArrayRef dims_self, IntArrayRef dims_other, Tensor & out); // {"schema": "aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor threshold(const Tensor & self, const Scalar & threshold, const Scalar & value); // {"schema": "aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & threshold_(Tensor & self, const Scalar & threshold, const Scalar & value); // {"schema": "aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & threshold_out(const Tensor & self, const Scalar & threshold, const Scalar & value, Tensor & out); // {"schema": "aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & threshold_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & threshold, Tensor & grad_input); // {"schema": "aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor threshold_backward(const Tensor & grad_output, const Tensor & self, const Scalar & threshold); // {"schema": "aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor", "dispatch": "True", "default": "True"} +Tensor tile(const Tensor & self, c10::SymIntArrayRef dims); // {"schema": "aten::tile(Tensor self, SymInt[] dims) -> Tensor", "dispatch": "False", "default": "True"} +Tensor transpose(const Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor transpose(const Tensor & self, Dimname dim0, Dimname dim1); // {"schema": "aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _mkldnn_transpose(const Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & transpose_(Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mkldnn_transpose_(Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor one_hot(const Tensor & self, int64_t num_classes); // {"schema": "aten::one_hot(Tensor self, int num_classes=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor flip(const Tensor & self, IntArrayRef dims); // {"schema": "aten::flip(Tensor self, int[] dims) -> Tensor", "dispatch": "True", "default": "False"} +Tensor fliplr(const Tensor & self); // {"schema": "aten::fliplr(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor flipud(const Tensor & self); // {"schema": "aten::flipud(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor roll(const Tensor & self, c10::SymIntArrayRef shifts, IntArrayRef dims); // {"schema": "aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor", "dispatch": "True", "default": "False"} +Tensor rot90(const Tensor & self, int64_t k, IntArrayRef dims); // {"schema": "aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor", "dispatch": "True", "default": "True"} +Tensor trapezoid(const Tensor & y, const Tensor & x, int64_t dim); // {"schema": "aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor trapezoid(const Tensor & y, const Scalar & dx, int64_t dim); // {"schema": "aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor trapz(const Tensor & y, const Tensor & x, int64_t dim); // {"schema": "aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor trapz(const Tensor & y, double dx, int64_t dim); // {"schema": "aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _transform_bias_rescale_qkv(const Tensor & qkv, const Tensor & qkv_bias, int64_t num_heads); // {"schema": "aten::_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_from_mask(const Tensor & t, const Tensor & mask, bool mask_check); // {"schema": "aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor", "dispatch": "True", "default": "False"} +bool _nested_tensor_from_mask_left_aligned(const Tensor & t, const Tensor & mask); // {"schema": "aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool", "dispatch": "True", "default": "False"} +Tensor _nested_from_padded(const Tensor & padded, const Tensor & cpu_nested_shape_example, bool fuse_transform_0213); // {"schema": "aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_size(const Tensor & self); // {"schema": "aten::_nested_tensor_size(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_strides(const Tensor & self); // {"schema": "aten::_nested_tensor_strides(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_storage_offsets(const Tensor & self); // {"schema": "aten::_nested_tensor_storage_offsets(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_from_padded_and_nested_example(const Tensor & padded, const Tensor & nt_example); // {"schema": "aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_view_from_buffer(const Tensor & self, const Tensor & nested_size, const Tensor & nested_strides, const Tensor & offsets); // {"schema": "aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor _nested_view_from_buffer_copy(const Tensor & self, const Tensor & nested_size, const Tensor & nested_strides, const Tensor & offsets); // {"schema": "aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _nested_view_from_jagged(const Tensor & self, const Tensor & offsets, const Tensor & dummy, const c10::optional & lengths, int64_t ragged_idx); // {"schema": "aten::_nested_view_from_jagged(Tensor(a) self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor _nested_view_from_jagged_copy(const Tensor & self, const Tensor & offsets, const Tensor & dummy, const c10::optional & lengths, int64_t ragged_idx); // {"schema": "aten::_nested_view_from_jagged_copy(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _nested_get_values(const Tensor & self); // {"schema": "aten::_nested_get_values(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor _nested_get_values_copy(const Tensor & self); // {"schema": "aten::_nested_get_values_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _nested_get_offsets(const Tensor & self); // {"schema": "aten::_nested_get_offsets(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_get_lengths(const Tensor & self); // {"schema": "aten::_nested_get_lengths(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +int64_t _nested_get_ragged_idx(const Tensor & self); // {"schema": "aten::_nested_get_ragged_idx(Tensor self) -> int", "dispatch": "True", "default": "False"} +Tensor _nested_get_jagged_dummy(const Tensor & any); // {"schema": "aten::_nested_get_jagged_dummy(Tensor any) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _trilinear(const Tensor & i1, const Tensor & i2, const Tensor & i3, IntArrayRef expand1, IntArrayRef expand2, IntArrayRef expand3, IntArrayRef sumdim, int64_t unroll_dim); // {"schema": "aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor triplet_margin_loss(const Tensor & anchor, const Tensor & positive, const Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction); // {"schema": "aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor trunc(const Tensor & self); // {"schema": "aten::trunc(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & trunc_(Tensor & self); // {"schema": "aten::trunc_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & trunc_out(const Tensor & self, Tensor & out); // {"schema": "aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fix(const Tensor & self); // {"schema": "aten::fix(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fix_(Tensor & self); // {"schema": "aten::fix_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & fix_out(const Tensor & self, Tensor & out); // {"schema": "aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor type_as(const Tensor & self, const Tensor & other); // {"schema": "aten::type_as(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +bool _has_compatible_shallow_copy_type(const Tensor & self, const Tensor & from); // {"schema": "aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool", "dispatch": "False", "default": "True"} +::std::tuple _unique(const Tensor & self, bool sorted, bool return_inverse); // {"schema": "aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple unique_dim(const Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts); // {"schema": "aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple unique_consecutive(const Tensor & self, bool return_inverse, bool return_counts, c10::optional dim); // {"schema": "aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple unique_dim_consecutive(const Tensor & self, int64_t dim, bool return_inverse, bool return_counts); // {"schema": "aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _unique2(const Tensor & self, bool sorted, bool return_inverse, bool return_counts); // {"schema": "aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _unsafe_view(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor", "dispatch": "True", "default": "True"} +Tensor unsqueeze(const Tensor & self, int64_t dim); // {"schema": "aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & unsqueeze_(Tensor & self, int64_t dim); // {"schema": "aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor vander(const Tensor & x, c10::optional N, bool increasing); // {"schema": "aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor var(const Tensor & self, bool unbiased); // {"schema": "aten::var(Tensor self, bool unbiased=True) -> Tensor", "dispatch": "False", "default": "True"} +Tensor var(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim); // {"schema": "aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor var(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & var_out(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim, Tensor & out); // {"schema": "aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & var_out(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, Tensor & out); // {"schema": "aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor var(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); // {"schema": "aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & var_out(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim, Tensor & out); // {"schema": "aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor var(const Tensor & self, DimnameList dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & var_out(const Tensor & self, DimnameList dim, const c10::optional & correction, bool keepdim, Tensor & out); // {"schema": "aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple var_mean(const Tensor & self, bool unbiased); // {"schema": "aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple var_mean(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim); // {"schema": "aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple var_mean(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple var_mean(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); // {"schema": "aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple var_mean(const Tensor & self, DimnameList dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor view_as(const Tensor & self, const Tensor & other); // {"schema": "aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor where(const Tensor & condition, const Tensor & self, const Tensor & other); // {"schema": "aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & where_out(const Tensor & condition, const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor where(const Tensor & condition, const Scalar & self, const Tensor & other); // {"schema": "aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor where(const Tensor & condition, const Tensor & self, const Scalar & other); // {"schema": "aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor where(const Tensor & condition, const Scalar & self, const Scalar & other); // {"schema": "aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector where(const Tensor & condition); // {"schema": "aten::where(Tensor condition) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor norm_except_dim(const Tensor & v, int64_t pow, int64_t dim); // {"schema": "aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _weight_norm(const Tensor & v, const Tensor & g, int64_t dim); // {"schema": "aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _weight_norm_interface(const Tensor & v, const Tensor & g, int64_t dim); // {"schema": "aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _weight_norm_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim); // {"schema": "aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _weight_norm_differentiable_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim); // {"schema": "aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor zeros(IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _efficientzerotensor(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor zeros(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & zeros_out(c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor zeros_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output); // {"schema": "aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _standard_gamma(const Tensor & self, c10::optional generator); // {"schema": "aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total); // {"schema": "aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sample_dirichlet(const Tensor & self, c10::optional generator); // {"schema": "aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor poisson(const Tensor & self, c10::optional generator); // {"schema": "aten::poisson(Tensor self, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor binomial(const Tensor & count, const Tensor & prob, c10::optional generator); // {"schema": "aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor native_norm(const Tensor & self, const Scalar & p); // {"schema": "aten::native_norm(Tensor self, Scalar p=2) -> Tensor", "dispatch": "True", "default": "False"} +Tensor native_norm(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_sum(const Tensor & self); // {"schema": "aten::_sparse_sum(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_sum(const Tensor & self, ScalarType dtype); // {"schema": "aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_sum(const Tensor & self, IntArrayRef dim); // {"schema": "aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _sparse_sum(const Tensor & self, IntArrayRef dim, ScalarType dtype); // {"schema": "aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_sum_backward(const Tensor & grad, const Tensor & self, IntArrayRef dim); // {"schema": "aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_csr_sum(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_csr_prod(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_softmax(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_softmax(const Tensor & self, int64_t dim, bool half_to_float); // {"schema": "aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self); // {"schema": "aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_log_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_log_softmax(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_log_softmax(const Tensor & self, int64_t dim, bool half_to_float); // {"schema": "aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_log_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self); // {"schema": "aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _spdiags(const Tensor & diagonals, const Tensor & offsets, IntArrayRef shape, c10::optional layout); // {"schema": "aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor norm(const Tensor & self, const c10::optional & p, ScalarType dtype); // {"schema": "aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor norm(const Tensor & self, const Scalar & p); // {"schema": "aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor norm(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, ScalarType dtype); // {"schema": "aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor norm(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim); // {"schema": "aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, ScalarType dtype, Tensor & out); // {"schema": "aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor norm(const Tensor & self, const c10::optional & p, DimnameList dim, bool keepdim, ScalarType dtype); // {"schema": "aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor", "dispatch": "False", "default": "True"} +Tensor norm(const Tensor & self, const c10::optional & p, DimnameList dim, bool keepdim); // {"schema": "aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, DimnameList dim, bool keepdim, ScalarType dtype, Tensor & out); // {"schema": "aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, DimnameList dim, bool keepdim, Tensor & out); // {"schema": "aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple frexp(const Tensor & self); // {"schema": "aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)", "dispatch": "True", "default": "True"} +::std::tuple frexp_out(const Tensor & self, Tensor & mantissa, Tensor & exponent); // {"schema": "aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)", "dispatch": "True", "default": "False"} +Tensor frobenius_norm(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & frobenius_norm_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nuclear_norm(const Tensor & self, bool keepdim); // {"schema": "aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nuclear_norm_out(const Tensor & self, bool keepdim, Tensor & out); // {"schema": "aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nuclear_norm(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nuclear_norm_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor clone(const Tensor & self, c10::optional memory_format); // {"schema": "aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor positive(const Tensor & self); // {"schema": "aten::positive(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +const Tensor & resize_as_(const Tensor & self, const Tensor & the_template, c10::optional memory_format); // {"schema": "aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +const Tensor & resize_as_sparse_(const Tensor & self, const Tensor & the_template); // {"schema": "aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & zero_(Tensor & self); // {"schema": "aten::zero_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & sub_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sub(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sub_(Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor sub(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sub_(Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & subtract_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor subtract(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & subtract_(Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor subtract(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & subtract_(Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor rsub(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & heaviside_out(const Tensor & self, const Tensor & values, Tensor & out); // {"schema": "aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor heaviside(const Tensor & self, const Tensor & values); // {"schema": "aten::heaviside(Tensor self, Tensor values) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & heaviside_(Tensor & self, const Tensor & values); // {"schema": "aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor rsub(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _sparse_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sparse_sampled_addmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sparse_sampled_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _sparse_mm_reduce_impl(const Tensor & self, const Tensor & other, c10::string_view reduce); // {"schema": "aten::_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _sparse_mm_reduce_impl_backward(const Tensor & self, const Tensor & grad_out, const Tensor & weight, c10::string_view reduce, const Tensor & arg_out, ::std::array output_mask); // {"schema": "aten::_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & addmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addmm_(Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _addmm_activation_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, bool use_gelu, Tensor & out); // {"schema": "aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _addmm_activation(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, bool use_gelu); // {"schema": "aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple _scaled_mm(const Tensor & self, const Tensor & mat2, const c10::optional & bias, c10::optional out_dtype, const c10::optional & scale_a, const c10::optional & scale_b, const c10::optional & scale_result, bool use_fast_accum); // {"schema": "aten::_scaled_mm(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_mm_out(const Tensor & self, const Tensor & mat2, const c10::optional & bias, c10::optional out_dtype, const c10::optional & scale_a, const c10::optional & scale_b, const c10::optional & scale_result, bool use_fast_accum, Tensor & out, Tensor & out_amax); // {"schema": "aten::_scaled_mm.out(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False, Tensor(a!) out, Tensor(b!) out_amax) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +Tensor sparse_compressed_tensor(const Tensor & compressed_indices, const Tensor & plain_indices, const Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sparse_csr_tensor(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_csc_tensor(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_bsr_tensor(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_bsc_tensor(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_compressed_tensor(const Tensor & compressed_indices, const Tensor & plain_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sparse_csr_tensor(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_csc_tensor(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_bsr_tensor(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_bsc_tensor(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_compressed_tensor_unsafe(const Tensor & compressed_indices, const Tensor & plain_indices, const Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_csr_tensor_unsafe(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_csc_tensor_unsafe(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_bsr_tensor_unsafe(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_bsc_tensor_unsafe(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_coo_tensor(IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); // {"schema": "aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); // {"schema": "aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_coo_tensor_unsafe(const Tensor & indices, const Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); // {"schema": "aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor", "dispatch": "False", "default": "True"} +void _validate_sparse_coo_tensor_args(const Tensor & indices, const Tensor & values, IntArrayRef size, c10::optional is_coalesced); // {"schema": "aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size, bool? is_coalesced=None) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_compressed_tensor_args(const Tensor & compressed_indices, const Tensor & plain_indices, const Tensor & values, IntArrayRef size, Layout layout); // {"schema": "aten::_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_csr_tensor_args(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size); // {"schema": "aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_csc_tensor_args(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size); // {"schema": "aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_bsr_tensor_args(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size); // {"schema": "aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_bsc_tensor_args(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size); // {"schema": "aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()", "dispatch": "False", "default": "True"} +Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const Tensor & indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); // {"schema": "aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor", "dispatch": "True", "default": "False"} +const Tensor & sparse_resize_(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); // {"schema": "aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)", "dispatch": "True", "default": "False"} +const Tensor & sparse_resize_and_clear_(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); // {"schema": "aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sparse_mask(const Tensor & self, const Tensor & mask); // {"schema": "aten::sparse_mask(Tensor self, Tensor mask) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_mask_projection(const Tensor & self, const Tensor & mask, bool accumulate_matches); // {"schema": "aten::_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor", "dispatch": "True", "default": "False"} +::std::vector _to_cpu(TensorList tensors); // {"schema": "aten::_to_cpu(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor to_dense(const Tensor & self, c10::optional dtype, c10::optional masked_grad); // {"schema": "aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_dense(const Tensor & self, c10::optional dtype, c10::optional masked_grad); // {"schema": "aten::_to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_dense_backward(const Tensor & grad, const Tensor & input, c10::optional masked_grad); // {"schema": "aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor", "dispatch": "False", "default": "True"} +int64_t sparse_dim(const Tensor & self); // {"schema": "aten::sparse_dim(Tensor self) -> int", "dispatch": "True", "default": "False"} +int64_t _dimI(const Tensor & self); // {"schema": "aten::_dimI(Tensor self) -> int", "dispatch": "True", "default": "False"} +int64_t dense_dim(const Tensor & self); // {"schema": "aten::dense_dim(Tensor self) -> int", "dispatch": "True", "default": "False"} +int64_t _dimV(const Tensor & self); // {"schema": "aten::_dimV(Tensor self) -> int", "dispatch": "True", "default": "False"} +int64_t _nnz(const Tensor & self); // {"schema": "aten::_nnz(Tensor self) -> int", "dispatch": "True", "default": "False"} +Tensor coalesce(const Tensor & self); // {"schema": "aten::coalesce(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _coalesce(const Tensor & self); // {"schema": "aten::_coalesce(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +bool is_coalesced(const Tensor & self); // {"schema": "aten::is_coalesced(Tensor self) -> bool", "dispatch": "True", "default": "True"} +Tensor _indices(const Tensor & self); // {"schema": "aten::_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor _values(const Tensor & self); // {"schema": "aten::_values(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor & _coalesced_(Tensor & self, bool coalesced); // {"schema": "aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor indices(const Tensor & self); // {"schema": "aten::indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor values(const Tensor & self); // {"schema": "aten::values(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor crow_indices(const Tensor & self); // {"schema": "aten::crow_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor col_indices(const Tensor & self); // {"schema": "aten::col_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor ccol_indices(const Tensor & self); // {"schema": "aten::ccol_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor row_indices(const Tensor & self); // {"schema": "aten::row_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & hspmm_out(const Tensor & mat1, const Tensor & mat2, Tensor & out); // {"schema": "aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hspmm(const Tensor & mat1, const Tensor & mat2); // {"schema": "aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & copy_sparse_to_sparse_(Tensor & self, const Tensor & src, bool non_blocking); // {"schema": "aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::vector unbind(const Tensor & self, int64_t dim); // {"schema": "aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]", "dispatch": "True", "default": "True"} +::std::vector unbind(const Tensor & self, Dimname dim); // {"schema": "aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +Tensor to_sparse(const Tensor & self, int64_t sparse_dim); // {"schema": "aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_sparse(const Tensor & self, int64_t sparse_dim); // {"schema": "aten::_to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse(const Tensor & self, c10::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim); // {"schema": "aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_sparse(const Tensor & self, c10::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim); // {"schema": "aten::_to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse_csr(const Tensor & self, c10::optional dense_dim); // {"schema": "aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_sparse_csr(const Tensor & self, c10::optional dense_dim); // {"schema": "aten::_to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse_csc(const Tensor & self, c10::optional dense_dim); // {"schema": "aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_sparse_csc(const Tensor & self, c10::optional dense_dim); // {"schema": "aten::_to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse_bsr(const Tensor & self, IntArrayRef blocksize, c10::optional dense_dim); // {"schema": "aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_sparse_bsr(const Tensor & self, IntArrayRef blocksize, c10::optional dense_dim); // {"schema": "aten::_to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse_bsc(const Tensor & self, IntArrayRef blocksize, c10::optional dense_dim); // {"schema": "aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_sparse_bsc(const Tensor & self, IntArrayRef blocksize, c10::optional dense_dim); // {"schema": "aten::_to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _to_sparse_semi_structured(const Tensor & dense); // {"schema": "aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor to_mkldnn(const Tensor & self, c10::optional dtype); // {"schema": "aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_reorder_conv2d_weight(const Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, OptionalSymIntArrayRef input_size); // {"schema": "aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_reorder_conv3d_weight(const Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_mkldnn_backward(const Tensor & grad, const Tensor & input); // {"schema": "aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantize_per_tensor_dynamic(const Tensor & self, ScalarType dtype, bool reduce_range); // {"schema": "aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantize_per_tensor(const Tensor & self, double scale, int64_t zero_point, ScalarType dtype); // {"schema": "aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantize_per_tensor(const Tensor & self, const Tensor & scale, const Tensor & zero_point, ScalarType dtype); // {"schema": "aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "False"} +::std::vector quantize_per_tensor(TensorList tensors, const Tensor & scales, const Tensor & zero_points, ScalarType dtype); // {"schema": "aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]", "dispatch": "True", "default": "False"} +Tensor quantize_per_channel(const Tensor & self, const Tensor & scales, const Tensor & zero_points, int64_t axis, ScalarType dtype); // {"schema": "aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "False"} +Tensor dequantize(const Tensor & self); // {"schema": "aten::dequantize.self(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::vector dequantize(TensorList tensors); // {"schema": "aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]", "dispatch": "True", "default": "False"} +double q_scale(const Tensor & self); // {"schema": "aten::q_scale(Tensor self) -> float", "dispatch": "True", "default": "False"} +int64_t q_zero_point(const Tensor & self); // {"schema": "aten::q_zero_point(Tensor self) -> int", "dispatch": "True", "default": "False"} +Tensor q_per_channel_scales(const Tensor & self); // {"schema": "aten::q_per_channel_scales(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor q_per_channel_zero_points(const Tensor & self); // {"schema": "aten::q_per_channel_zero_points(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +int64_t q_per_channel_axis(const Tensor & self); // {"schema": "aten::q_per_channel_axis(Tensor self) -> int", "dispatch": "True", "default": "False"} +Tensor int_repr(const Tensor & self); // {"schema": "aten::int_repr(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _make_per_tensor_quantized_tensor(const Tensor & self, double scale, int64_t zero_point); // {"schema": "aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _make_per_channel_quantized_tensor(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis); // {"schema": "aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor", "dispatch": "True", "default": "False"} +QScheme qscheme(const Tensor & self); // {"schema": "aten::qscheme(Tensor self) -> QScheme", "dispatch": "True", "default": "False"} +Tensor fake_quantize_per_tensor_affine(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fake_quantize_per_tensor_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple fake_quantize_per_tensor_affine_cachemask(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)", "dispatch": "True", "default": "False"} +::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(const Tensor & self, const Tensor & scale, const Tensor & zero_point, const Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max); // {"schema": "aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)", "dispatch": "True", "default": "False"} +Tensor fake_quantize_per_tensor_affine_cachemask_backward(const Tensor & grad, const Tensor & mask); // {"schema": "aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _fake_quantize_learnable_per_tensor_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor); // {"schema": "aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _fake_quantize_learnable_per_tensor_affine_backward(const Tensor & grad, const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor); // {"schema": "aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor fake_quantize_per_channel_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple fake_quantize_per_channel_affine_cachemask(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)", "dispatch": "True", "default": "False"} +Tensor fake_quantize_per_channel_affine_cachemask_backward(const Tensor & grad, const Tensor & mask); // {"schema": "aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _fake_quantize_learnable_per_channel_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor); // {"schema": "aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _fake_quantize_learnable_per_channel_affine_backward(const Tensor & grad, const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor); // {"schema": "aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor fused_moving_avg_obs_fake_quant(const Tensor & self, const Tensor & observer_on, const Tensor & fake_quant_on, Tensor & running_min, Tensor & running_max, Tensor & scale, Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); // {"schema": "aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _fused_moving_avg_obs_fq_helper(const Tensor & self, const Tensor & observer_on, const Tensor & fake_quant_on, Tensor & running_min, Tensor & running_max, Tensor & scale, Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); // {"schema": "aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)", "dispatch": "True", "default": "False"} +::std::tuple _choose_qparams_per_tensor(const Tensor & self, bool reduce_range); // {"schema": "aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)", "dispatch": "False", "default": "True"} +Tensor _saturate_weight_to_fp16(const Tensor & weight); // {"schema": "aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple choose_qparams_optimized(const Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width); // {"schema": "aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor _autocast_to_reduced_precision(const Tensor & self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype); // {"schema": "aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _autocast_to_full_precision(const Tensor & self, bool cuda_enabled, bool cpu_enabled); // {"schema": "aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _to_copy(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, c10::optional memory_format); // {"schema": "aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor to(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, bool copy, c10::optional memory_format); // {"schema": "aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor to(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy, c10::optional memory_format); // {"schema": "aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor to(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy, c10::optional memory_format); // {"schema": "aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor to(const Tensor & self, const Tensor & other, bool non_blocking, bool copy, c10::optional memory_format); // {"schema": "aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +::std::vector meshgrid(TensorList tensors); // {"schema": "aten::meshgrid(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector meshgrid(TensorList tensors, c10::string_view indexing); // {"schema": "aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor cartesian_prod(TensorList tensors); // {"schema": "aten::cartesian_prod(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor combinations(const Tensor & self, int64_t r, bool with_replacement); // {"schema": "aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor", "dispatch": "False", "default": "True"} +Scalar item(const Tensor & self); // {"schema": "aten::item(Tensor self) -> Scalar", "dispatch": "False", "default": "True"} +ScalarType result_type(const Tensor & tensor, const Tensor & other); // {"schema": "aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType", "dispatch": "False", "default": "True"} +ScalarType result_type(const Tensor & tensor, const Scalar & other); // {"schema": "aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType", "dispatch": "False", "default": "True"} +ScalarType result_type(const Scalar & scalar, const Tensor & tensor); // {"schema": "aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType", "dispatch": "False", "default": "True"} +ScalarType result_type(const Scalar & scalar1, const Scalar & scalar2); // {"schema": "aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType", "dispatch": "False", "default": "True"} +bool can_cast(ScalarType from, ScalarType to); // {"schema": "aten::can_cast(ScalarType from, ScalarType to) -> bool", "dispatch": "False", "default": "True"} +ScalarType promote_types(ScalarType type1, ScalarType type2); // {"schema": "aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType", "dispatch": "False", "default": "True"} +Scalar _local_scalar_dense(const Tensor & self); // {"schema": "aten::_local_scalar_dense(Tensor self) -> Scalar", "dispatch": "True", "default": "False"} +::std::tuple _lstm_mps(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple,::std::vector> lstm_mps_backward(const c10::optional & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & z_state, const Tensor & cell_state_fwd, const Tensor & input, const Tensor & layersOutputs, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])", "dispatch": "True", "default": "False"} +::std::tuple _thnn_fused_lstm_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & cx, const c10::optional & input_bias, const c10::optional & hidden_bias); // {"schema": "aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _thnn_fused_lstm_cell_backward_impl(const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & cx, const Tensor & cy, const Tensor & workspace, bool has_bias); // {"schema": "aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _thnn_fused_lstm_cell_backward(const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & cx, const Tensor & cy, const Tensor & workspace, bool has_bias); // {"schema": "aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple _thnn_differentiable_lstm_cell_backward(const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & input_gates, const Tensor & hidden_gates, const c10::optional & input_bias, const c10::optional & hidden_bias, const Tensor & cx, const Tensor & cy); // {"schema": "aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple _thnn_fused_gru_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias); // {"schema": "aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _thnn_fused_gru_cell_backward(const Tensor & grad_hy, const Tensor & workspace, bool has_bias); // {"schema": "aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _thnn_differentiable_gru_cell_backward(const Tensor & grad_hy, const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias); // {"schema": "aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple lstm(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple lstm(const Tensor & data, const Tensor & batch_sizes, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); // {"schema": "aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple gru(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple gru(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); // {"schema": "aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple rnn_tanh(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple rnn_tanh(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); // {"schema": "aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple rnn_relu(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple rnn_relu(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); // {"schema": "aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); // {"schema": "aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); // {"schema": "aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); // {"schema": "aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); // {"schema": "aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple quantized_lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, const Scalar & scale_ih, const Scalar & scale_hh, const Scalar & zero_point_ih, const Scalar & zero_point_hh); // {"schema": "aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor quantized_gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, const Scalar & scale_ih, const Scalar & scale_hh, const Scalar & zero_point_ih, const Scalar & zero_point_hh); // {"schema": "aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantized_rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, const Scalar & scale_ih, const Scalar & scale_hh, const Scalar & zero_point_ih, const Scalar & zero_point_hh); // {"schema": "aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantized_rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, const Scalar & scale_ih, const Scalar & scale_hh, const Scalar & zero_point_ih, const Scalar & zero_point_hh); // {"schema": "aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _pack_padded_sequence(const Tensor & input, const Tensor & lengths, bool batch_first); // {"schema": "aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor _pack_padded_sequence_backward(const Tensor & grad, c10::SymIntArrayRef input_size, const Tensor & batch_sizes, bool batch_first); // {"schema": "aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _pad_packed_sequence(const Tensor & data, const Tensor & batch_sizes, bool batch_first, const Scalar & padding_value, int64_t total_length); // {"schema": "aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor & set_(Tensor & self, Storage source); // {"schema": "aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & set_(Tensor & self, Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & set_(Tensor & self, const Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & set_(Tensor & self, const Tensor & source); // {"schema": "aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & set_(Tensor & self); // {"schema": "aten::set_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lift(const Tensor & self); // {"schema": "aten::lift(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor lift_fresh(const Tensor & self); // {"schema": "aten::lift_fresh(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor lift_fresh_copy(const Tensor & self); // {"schema": "aten::lift_fresh_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +bool is_set_to(const Tensor & self, const Tensor & tensor); // {"schema": "aten::is_set_to(Tensor self, Tensor tensor) -> bool", "dispatch": "True", "default": "False"} +Tensor & masked_fill_(Tensor & self, const Tensor & mask, const Scalar & value); // {"schema": "aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor masked_fill(const Tensor & self, const Tensor & mask, const Scalar & value); // {"schema": "aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & masked_fill_(Tensor & self, const Tensor & mask, const Tensor & value); // {"schema": "aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor masked_fill(const Tensor & self, const Tensor & mask, const Tensor & value); // {"schema": "aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source); // {"schema": "aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor masked_scatter(const Tensor & self, const Tensor & mask, const Tensor & source); // {"schema": "aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor", "dispatch": "True", "default": "True"} +Tensor masked_scatter_backward(const Tensor & grad_output, const Tensor & mask, c10::SymIntArrayRef sizes); // {"schema": "aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _masked_softmax(const Tensor & self, const Tensor & mask, c10::optional dim, c10::optional mask_type); // {"schema": "aten::_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _masked_softmax_backward(const Tensor & grad_output, const Tensor & output, const Tensor & mask, c10::optional dim); // {"schema": "aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor view(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor view(const Tensor & self, ScalarType dtype); // {"schema": "aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & put_(Tensor & self, const Tensor & index, const Tensor & source, bool accumulate); // {"schema": "aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor put(const Tensor & self, const Tensor & index, const Tensor & source, bool accumulate); // {"schema": "aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_add_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar & alpha, Tensor & out); // {"schema": "aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & index_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar & alpha); // {"schema": "aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor index_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar & alpha); // {"schema": "aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor index_add(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source, const Scalar & alpha); // {"schema": "aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & index_reduce_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, c10::string_view reduce, bool include_self, Tensor & out); // {"schema": "aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & index_reduce_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, c10::string_view reduce, bool include_self); // {"schema": "aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor index_reduce(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, c10::string_view reduce, bool include_self); // {"schema": "aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_fill_(Tensor & self, int64_t dim, const Tensor & index, const Scalar & value); // {"schema": "aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor index_fill(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value); // {"schema": "aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_fill_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & value); // {"schema": "aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor index_fill(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & value); // {"schema": "aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_fill_(Tensor & self, Dimname dim, const Tensor & index, const Scalar & value); // {"schema": "aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & index_fill_(Tensor & self, Dimname dim, const Tensor & index, const Tensor & value); // {"schema": "aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor index_fill(const Tensor & self, Dimname dim, const Tensor & index, const Scalar & value); // {"schema": "aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor", "dispatch": "False", "default": "True"} +Tensor index_fill(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & value); // {"schema": "aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor", "dispatch": "False", "default": "True"} +Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, Tensor & out); // {"schema": "aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value); // {"schema": "aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Scalar & value); // {"schema": "aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_out(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, Tensor & out); // {"schema": "aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce); // {"schema": "aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce); // {"schema": "aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce, Tensor & out); // {"schema": "aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, c10::string_view reduce); // {"schema": "aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, c10::string_view reduce); // {"schema": "aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_out(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, c10::string_view reduce, Tensor & out); // {"schema": "aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor", "dispatch": "False", "default": "True"} +Tensor scatter(const Tensor & self, Dimname dim, const Tensor & index, const Scalar & value); // {"schema": "aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor", "dispatch": "False", "default": "True"} +Tensor scatter_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_add_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, Tensor & out); // {"schema": "aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter_add(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor", "dispatch": "False", "default": "True"} +Tensor scatter_reduce(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce, bool include_self); // {"schema": "aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_reduce_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce, bool include_self); // {"schema": "aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_reduce_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce, bool include_self, Tensor & out); // {"schema": "aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & eq_(Tensor & self, const Scalar & other); // {"schema": "aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & eq_(Tensor & self, const Tensor & other); // {"schema": "aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_and_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bitwise_and_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_and(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_and(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_and(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_and_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_and_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor __and__(const Tensor & self, const Scalar & other); // {"schema": "aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor __and__(const Tensor & self, const Tensor & other); // {"schema": "aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & __iand__(Tensor & self, const Scalar & other); // {"schema": "aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & __iand__(Tensor & self, const Tensor & other); // {"schema": "aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & bitwise_or_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bitwise_or_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_or(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_or(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_or(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_or_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_or_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor __or__(const Tensor & self, const Scalar & other); // {"schema": "aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor __or__(const Tensor & self, const Tensor & other); // {"schema": "aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & __ior__(Tensor & self, const Scalar & other); // {"schema": "aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & __ior__(Tensor & self, const Tensor & other); // {"schema": "aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & bitwise_xor_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bitwise_xor_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_xor(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_xor(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_xor(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_xor_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_xor_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor __xor__(const Tensor & self, const Scalar & other); // {"schema": "aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor __xor__(const Tensor & self, const Tensor & other); // {"schema": "aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & __ixor__(Tensor & self, const Scalar & other); // {"schema": "aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & __ixor__(Tensor & self, const Tensor & other); // {"schema": "aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor __lshift__(const Tensor & self, const Scalar & other); // {"schema": "aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor __lshift__(const Tensor & self, const Tensor & other); // {"schema": "aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & __ilshift__(Tensor & self, const Scalar & other); // {"schema": "aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & __ilshift__(Tensor & self, const Tensor & other); // {"schema": "aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bitwise_left_shift(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bitwise_left_shift(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_left_shift(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor __rshift__(const Tensor & self, const Scalar & other); // {"schema": "aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor __rshift__(const Tensor & self, const Tensor & other); // {"schema": "aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & __irshift__(Tensor & self, const Scalar & other); // {"schema": "aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & __irshift__(Tensor & self, const Tensor & other); // {"schema": "aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bitwise_right_shift(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bitwise_right_shift(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_right_shift(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tril_(Tensor & self, int64_t diagonal); // {"schema": "aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & triu_(Tensor & self, int64_t diagonal); // {"schema": "aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & digamma_(Tensor & self); // {"schema": "aten::digamma_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lerp_(Tensor & self, const Tensor & end, const Scalar & weight); // {"schema": "aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lerp_(Tensor & self, const Tensor & end, const Tensor & weight); // {"schema": "aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & addbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & addbmm_out(const Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & random_(Tensor & self, int64_t from, c10::optional to, c10::optional generator); // {"schema": "aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & random_(Tensor & self, int64_t to, c10::optional generator); // {"schema": "aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & random_(Tensor & self, c10::optional generator); // {"schema": "aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & uniform_(Tensor & self, double from, double to, c10::optional generator); // {"schema": "aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & cauchy_(Tensor & self, double median, double sigma, c10::optional generator); // {"schema": "aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & log_normal_(Tensor & self, double mean, double std, c10::optional generator); // {"schema": "aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & exponential_(Tensor & self, double lambd, c10::optional generator); // {"schema": "aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & geometric_(Tensor & self, double p, c10::optional generator); // {"schema": "aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & diag_out(const Tensor & self, int64_t diagonal, Tensor & out); // {"schema": "aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor diag(const Tensor & self, int64_t diagonal); // {"schema": "aten::diag(Tensor self, int diagonal=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & cross_out(const Tensor & self, const Tensor & other, c10::optional dim, Tensor & out); // {"schema": "aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor cross(const Tensor & self, const Tensor & other, c10::optional dim); // {"schema": "aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & triu_out(const Tensor & self, int64_t diagonal, Tensor & out); // {"schema": "aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor triu(const Tensor & self, int64_t diagonal); // {"schema": "aten::triu(Tensor self, int diagonal=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tril_out(const Tensor & self, int64_t diagonal, Tensor & out); // {"schema": "aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor tril(const Tensor & self, int64_t diagonal); // {"schema": "aten::tril(Tensor self, int diagonal=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor tril_indices(int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor triu_indices(int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor trace(const Tensor & self); // {"schema": "aten::trace(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor trace_backward(const Tensor & grad, c10::SymIntArrayRef sizes); // {"schema": "aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & ne_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ne(const Tensor & self, const Scalar & other); // {"schema": "aten::ne.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ne_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ne(const Tensor & self, const Tensor & other); // {"schema": "aten::ne.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ne_(Tensor & self, const Scalar & other); // {"schema": "aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ne_(Tensor & self, const Tensor & other); // {"schema": "aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & not_equal_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor not_equal(const Tensor & self, const Scalar & other); // {"schema": "aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & not_equal_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor not_equal(const Tensor & self, const Tensor & other); // {"schema": "aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & not_equal_(Tensor & self, const Scalar & other); // {"schema": "aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & not_equal_(Tensor & self, const Tensor & other); // {"schema": "aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & eq_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor eq(const Tensor & self, const Scalar & other); // {"schema": "aten::eq.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & eq_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor eq(const Tensor & self, const Tensor & other); // {"schema": "aten::eq.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ge_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ge(const Tensor & self, const Scalar & other); // {"schema": "aten::ge.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ge_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ge(const Tensor & self, const Tensor & other); // {"schema": "aten::ge.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ge_(Tensor & self, const Scalar & other); // {"schema": "aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ge_(Tensor & self, const Tensor & other); // {"schema": "aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & greater_equal_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor greater_equal(const Tensor & self, const Scalar & other); // {"schema": "aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & greater_equal_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor greater_equal(const Tensor & self, const Tensor & other); // {"schema": "aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & greater_equal_(Tensor & self, const Scalar & other); // {"schema": "aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & greater_equal_(Tensor & self, const Tensor & other); // {"schema": "aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & le_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor le(const Tensor & self, const Scalar & other); // {"schema": "aten::le.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & le_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor le(const Tensor & self, const Tensor & other); // {"schema": "aten::le.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & le_(Tensor & self, const Scalar & other); // {"schema": "aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & le_(Tensor & self, const Tensor & other); // {"schema": "aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & less_equal_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor less_equal(const Tensor & self, const Scalar & other); // {"schema": "aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & less_equal_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor less_equal(const Tensor & self, const Tensor & other); // {"schema": "aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & less_equal_(Tensor & self, const Scalar & other); // {"schema": "aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & less_equal_(Tensor & self, const Tensor & other); // {"schema": "aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & gt_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gt(const Tensor & self, const Scalar & other); // {"schema": "aten::gt.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & gt_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gt(const Tensor & self, const Tensor & other); // {"schema": "aten::gt.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & gt_(Tensor & self, const Scalar & other); // {"schema": "aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & gt_(Tensor & self, const Tensor & other); // {"schema": "aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & greater_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor greater(const Tensor & self, const Scalar & other); // {"schema": "aten::greater.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & greater_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor greater(const Tensor & self, const Tensor & other); // {"schema": "aten::greater.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & greater_(Tensor & self, const Scalar & other); // {"schema": "aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & greater_(Tensor & self, const Tensor & other); // {"schema": "aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & lt_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lt(const Tensor & self, const Scalar & other); // {"schema": "aten::lt.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & lt_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lt(const Tensor & self, const Tensor & other); // {"schema": "aten::lt.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & lt_(Tensor & self, const Scalar & other); // {"schema": "aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lt_(Tensor & self, const Tensor & other); // {"schema": "aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & less_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor less(const Tensor & self, const Scalar & other); // {"schema": "aten::less.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & less_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor less(const Tensor & self, const Tensor & other); // {"schema": "aten::less.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & less_(Tensor & self, const Scalar & other); // {"schema": "aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & less_(Tensor & self, const Tensor & other); // {"schema": "aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & take_out(const Tensor & self, const Tensor & index, Tensor & out); // {"schema": "aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor take(const Tensor & self, const Tensor & index); // {"schema": "aten::take(Tensor self, Tensor index) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & take_along_dim_out(const Tensor & self, const Tensor & indices, c10::optional dim, Tensor & out); // {"schema": "aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor take_along_dim(const Tensor & self, const Tensor & indices, c10::optional dim); // {"schema": "aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & index_select_out(const Tensor & self, int64_t dim, const Tensor & index, Tensor & out); // {"schema": "aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor index_select(const Tensor & self, int64_t dim, const Tensor & index); // {"schema": "aten::index_select(Tensor self, int dim, Tensor index) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & index_select_out(const Tensor & self, Dimname dim, const Tensor & index, Tensor & out); // {"schema": "aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor index_select(const Tensor & self, Dimname dim, const Tensor & index); // {"schema": "aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor", "dispatch": "False", "default": "True"} +Tensor index_select_backward(const Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const Tensor & index); // {"schema": "aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & masked_select_out(const Tensor & self, const Tensor & mask, Tensor & out); // {"schema": "aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor masked_select(const Tensor & self, const Tensor & mask); // {"schema": "aten::masked_select(Tensor self, Tensor mask) -> Tensor", "dispatch": "True", "default": "False"} +Tensor masked_select_backward(const Tensor & grad, const Tensor & input, const Tensor & mask); // {"schema": "aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nonzero_out(const Tensor & self, Tensor & out); // {"schema": "aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nonzero(const Tensor & self); // {"schema": "aten::nonzero(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & nonzero_static_out(const Tensor & self, int64_t size, int64_t fill_value, Tensor & out); // {"schema": "aten::nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nonzero_static(const Tensor & self, int64_t size, int64_t fill_value); // {"schema": "aten::nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor", "dispatch": "True", "default": "False"} +::std::vector nonzero_numpy(const Tensor & self); // {"schema": "aten::nonzero_numpy(Tensor self) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor argwhere(const Tensor & self); // {"schema": "aten::argwhere(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & gather_out(const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad, Tensor & out); // {"schema": "aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gather(const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad); // {"schema": "aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor gather_backward(const Tensor & grad, const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad); // {"schema": "aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & gather_out(const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad, Tensor & out); // {"schema": "aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor gather(const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad); // {"schema": "aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _gather_sparse_backward(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & grad); // {"schema": "aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & addcmul_out(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value, Tensor & out); // {"schema": "aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value); // {"schema": "aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addcmul_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value); // {"schema": "aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & addcdiv_out(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value, Tensor & out); // {"schema": "aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value); // {"schema": "aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addcdiv_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value); // {"schema": "aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor cross_entropy_loss(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing); // {"schema": "aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple triangular_solve_out(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular, Tensor & X, Tensor & M); // {"schema": "aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)", "dispatch": "True", "default": "False"} +::std::tuple triangular_solve(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular); // {"schema": "aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)", "dispatch": "True", "default": "True"} +void _linalg_check_errors(const Tensor & info, c10::string_view api_name, bool is_matrix); // {"schema": "aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()", "dispatch": "True", "default": "True"} +Tensor & linalg_solve_triangular_out(const Tensor & self, const Tensor & B, bool upper, bool left, bool unitriangular, Tensor & out); // {"schema": "aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor linalg_solve_triangular(const Tensor & self, const Tensor & B, bool upper, bool left, bool unitriangular); // {"schema": "aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor linalg_vander(const Tensor & x, c10::optional N); // {"schema": "aten::linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple svd_out(const Tensor & self, bool some, bool compute_uv, Tensor & U, Tensor & S, Tensor & V); // {"schema": "aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)", "dispatch": "False", "default": "True"} +::std::tuple svd(const Tensor & self, bool some, bool compute_uv); // {"schema": "aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)", "dispatch": "False", "default": "True"} +Tensor swapaxes(const Tensor & self, int64_t axis0, int64_t axis1); // {"schema": "aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor & swapaxes_(Tensor & self, int64_t axis0, int64_t axis1); // {"schema": "aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor swapdims(const Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor & swapdims_(Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & cholesky_out(const Tensor & self, bool upper, Tensor & out); // {"schema": "aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cholesky(const Tensor & self, bool upper); // {"schema": "aten::cholesky(Tensor self, bool upper=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & cholesky_solve_out(const Tensor & self, const Tensor & input2, bool upper, Tensor & out); // {"schema": "aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor cholesky_solve(const Tensor & self, const Tensor & input2, bool upper); // {"schema": "aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper); // {"schema": "aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cholesky_inverse(const Tensor & self, bool upper); // {"schema": "aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & cholesky_inverse_out(const Tensor & self, bool upper, Tensor & out); // {"schema": "aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple qr_out(const Tensor & self, bool some, Tensor & Q, Tensor & R); // {"schema": "aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)", "dispatch": "False", "default": "True"} +::std::tuple qr(const Tensor & self, bool some); // {"schema": "aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)", "dispatch": "False", "default": "True"} +::std::tuple geqrf_out(const Tensor & self, Tensor & a, Tensor & tau); // {"schema": "aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)", "dispatch": "True", "default": "False"} +::std::tuple geqrf(const Tensor & self); // {"schema": "aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)", "dispatch": "True", "default": "False"} +Tensor orgqr(const Tensor & self, const Tensor & input2); // {"schema": "aten::orgqr(Tensor self, Tensor input2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & orgqr_out(const Tensor & self, const Tensor & input2, Tensor & out); // {"schema": "aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & ormqr_out(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose, Tensor & out); // {"schema": "aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose); // {"schema": "aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _lu_with_info(const Tensor & self, bool pivot, bool check_errors); // {"schema": "aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)", "dispatch": "False", "default": "True"} +Tensor & lu_solve_out(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots, Tensor & out); // {"schema": "aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor lu_solve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots); // {"schema": "aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple lu_unpack(const Tensor & LU_data, const Tensor & LU_pivots, bool unpack_data, bool unpack_pivots); // {"schema": "aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)", "dispatch": "True", "default": "True"} +::std::tuple lu_unpack_out(const Tensor & LU_data, const Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, Tensor & P, Tensor & L, Tensor & U); // {"schema": "aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)", "dispatch": "True", "default": "False"} +Tensor & multinomial_out(const Tensor & self, int64_t num_samples, bool replacement, c10::optional generator, Tensor & out); // {"schema": "aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor multinomial(const Tensor & self, int64_t num_samples, bool replacement, c10::optional generator); // {"schema": "aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & lgamma_out(const Tensor & self, Tensor & out); // {"schema": "aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & lgamma_(Tensor & self); // {"schema": "aten::lgamma_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor lgamma(const Tensor & self); // {"schema": "aten::lgamma(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & digamma_out(const Tensor & self, Tensor & out); // {"schema": "aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor digamma(const Tensor & self); // {"schema": "aten::digamma(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & polygamma_out(int64_t n, const Tensor & self, Tensor & out); // {"schema": "aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor polygamma(int64_t n, const Tensor & self); // {"schema": "aten::polygamma(int n, Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & polygamma_(Tensor & self, int64_t n); // {"schema": "aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor erfinv(const Tensor & self); // {"schema": "aten::erfinv(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & erfinv_(Tensor & self); // {"schema": "aten::erfinv_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & erfinv_out(const Tensor & self, Tensor & out); // {"schema": "aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor i0(const Tensor & self); // {"schema": "aten::i0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & i0_(Tensor & self); // {"schema": "aten::i0_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & i0_out(const Tensor & self, Tensor & out); // {"schema": "aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sign(const Tensor & self); // {"schema": "aten::sign(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sign_(Tensor & self); // {"schema": "aten::sign_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sign_out(const Tensor & self, Tensor & out); // {"schema": "aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor signbit(const Tensor & self); // {"schema": "aten::signbit(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & signbit_out(const Tensor & self, Tensor & out); // {"schema": "aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor dist(const Tensor & self, const Tensor & other, const Scalar & p); // {"schema": "aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & atan2_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & atan2_(Tensor & self, const Tensor & other); // {"schema": "aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor atan2(const Tensor & self, const Tensor & other); // {"schema": "aten::atan2(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor arctan2(const Tensor & self, const Tensor & other); // {"schema": "aten::arctan2(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arctan2_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arctan2_(Tensor & self, const Tensor & other); // {"schema": "aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & lerp_out(const Tensor & self, const Tensor & end, const Scalar & weight, Tensor & out); // {"schema": "aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & lerp_out(const Tensor & self, const Tensor & end, const Tensor & weight, Tensor & out); // {"schema": "aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lerp(const Tensor & self, const Tensor & end, const Scalar & weight); // {"schema": "aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor", "dispatch": "True", "default": "True"} +Tensor lerp(const Tensor & self, const Tensor & end, const Tensor & weight); // {"schema": "aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & histc_out(const Tensor & self, int64_t bins, const Scalar & min, const Scalar & max, Tensor & out); // {"schema": "aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor histc(const Tensor & self, int64_t bins, const Scalar & min, const Scalar & max); // {"schema": "aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple histogram_out(const Tensor & self, const Tensor & bins, const c10::optional & weight, bool density, Tensor & hist, Tensor & bin_edges); // {"schema": "aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)", "dispatch": "True", "default": "False"} +::std::tuple histogram(const Tensor & self, const Tensor & bins, const c10::optional & weight, bool density); // {"schema": "aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)", "dispatch": "True", "default": "False"} +::std::tuple histogram_out(const Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density, Tensor & hist, Tensor & bin_edges); // {"schema": "aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)", "dispatch": "True", "default": "False"} +::std::tuple histogram(const Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)", "dispatch": "True", "default": "False"} +::std::vector _histogramdd_bin_edges(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]", "dispatch": "True", "default": "False"} +Tensor _histogramdd_from_bin_cts(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _histogramdd_from_bin_tensors(const Tensor & self, TensorList bins, const c10::optional & weight, bool density); // {"schema": "aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple> histogramdd(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)", "dispatch": "False", "default": "True"} +::std::tuple> histogramdd(const Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)", "dispatch": "False", "default": "True"} +::std::tuple> histogramdd(const Tensor & self, TensorList bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)", "dispatch": "False", "default": "True"} +Tensor & fmod_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor fmod(const Tensor & self, const Scalar & other); // {"schema": "aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fmod_(Tensor & self, const Scalar & other); // {"schema": "aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & fmod_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fmod(const Tensor & self, const Tensor & other); // {"schema": "aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fmod_(Tensor & self, const Tensor & other); // {"schema": "aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hypot_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hypot(const Tensor & self, const Tensor & other); // {"schema": "aten::hypot(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & hypot_(Tensor & self, const Tensor & other); // {"schema": "aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & igamma_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor igamma(const Tensor & self, const Tensor & other); // {"schema": "aten::igamma(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & igamma_(Tensor & self, const Tensor & other); // {"schema": "aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & igammac_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor igammac(const Tensor & self, const Tensor & other); // {"schema": "aten::igammac(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & igammac_(Tensor & self, const Tensor & other); // {"schema": "aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & nextafter_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nextafter(const Tensor & self, const Tensor & other); // {"schema": "aten::nextafter(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & nextafter_(Tensor & self, const Tensor & other); // {"schema": "aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & remainder_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor remainder(const Tensor & self, const Scalar & other); // {"schema": "aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & remainder_(Tensor & self, const Scalar & other); // {"schema": "aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & remainder_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor remainder(const Tensor & self, const Tensor & other); // {"schema": "aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & remainder_(Tensor & self, const Tensor & other); // {"schema": "aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor remainder(const Scalar & self, const Tensor & other); // {"schema": "aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor min(const Tensor & self); // {"schema": "aten::min(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & min_out(const Tensor & self, Tensor & out); // {"schema": "aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fmin(const Tensor & self, const Tensor & other); // {"schema": "aten::fmin(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fmin_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max(const Tensor & self); // {"schema": "aten::max(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor fmax(const Tensor & self, const Tensor & other); // {"schema": "aten::fmax(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fmax_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor maximum(const Tensor & self, const Tensor & other); // {"schema": "aten::maximum(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & maximum_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max(const Tensor & self, const Tensor & other); // {"schema": "aten::max.other(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & max_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & max_out(const Tensor & self, Tensor & out); // {"schema": "aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor minimum(const Tensor & self, const Tensor & other); // {"schema": "aten::minimum(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & minimum_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & min_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor min(const Tensor & self, const Tensor & other); // {"schema": "aten::min.other(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantile(const Tensor & self, const Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation); // {"schema": "aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", "dispatch": "False", "default": "True"} +Tensor & quantile_out(const Tensor & self, const Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, Tensor & out); // {"schema": "aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor quantile(const Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation); // {"schema": "aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", "dispatch": "False", "default": "True"} +Tensor & quantile_out(const Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, Tensor & out); // {"schema": "aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nanquantile(const Tensor & self, const Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation); // {"schema": "aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nanquantile_out(const Tensor & self, const Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, Tensor & out); // {"schema": "aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nanquantile(const Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation); // {"schema": "aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nanquantile_out(const Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, Tensor & out); // {"schema": "aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple sort_out(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices); // {"schema": "aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "True"} +::std::tuple sort_out(const Tensor & self, c10::optional stable, int64_t dim, bool descending, Tensor & values, Tensor & indices); // {"schema": "aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple sort(const Tensor & self, int64_t dim, bool descending); // {"schema": "aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple sort(const Tensor & self, c10::optional stable, int64_t dim, bool descending); // {"schema": "aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple sort_out(const Tensor & self, Dimname dim, bool descending, Tensor & values, Tensor & indices); // {"schema": "aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +::std::tuple sort_out(const Tensor & self, c10::optional stable, Dimname dim, bool descending, Tensor & values, Tensor & indices); // {"schema": "aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +::std::tuple sort(const Tensor & self, Dimname dim, bool descending); // {"schema": "aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple sort(const Tensor & self, c10::optional stable, Dimname dim, bool descending); // {"schema": "aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +Tensor & msort_out(const Tensor & self, Tensor & out); // {"schema": "aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor msort(const Tensor & self); // {"schema": "aten::msort(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor argsort(const Tensor & self, int64_t dim, bool descending); // {"schema": "aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor argsort(const Tensor & self, bool stable, int64_t dim, bool descending); // {"schema": "aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor argsort(const Tensor & self, Dimname dim, bool descending); // {"schema": "aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple topk_out(const Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted, Tensor & values, Tensor & indices); // {"schema": "aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple topk(const Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted); // {"schema": "aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +Tensor all(const Tensor & self); // {"schema": "aten::all(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & all_out(const Tensor & self, Tensor & out); // {"schema": "aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor any(const Tensor & self); // {"schema": "aten::any(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & any_out(const Tensor & self, Tensor & out); // {"schema": "aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & renorm_out(const Tensor & self, const Scalar & p, int64_t dim, const Scalar & maxnorm, Tensor & out); // {"schema": "aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor renorm(const Tensor & self, const Scalar & p, int64_t dim, const Scalar & maxnorm); // {"schema": "aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & renorm_(Tensor & self, const Scalar & p, int64_t dim, const Scalar & maxnorm); // {"schema": "aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor unfold(const Tensor & self, int64_t dimension, int64_t size, int64_t step); // {"schema": "aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor unfold_backward(const Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step); // {"schema": "aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor", "dispatch": "True", "default": "False"} +bool equal(const Tensor & self, const Tensor & other); // {"schema": "aten::equal(Tensor self, Tensor other) -> bool", "dispatch": "True", "default": "False"} +Tensor & pow_out(const Tensor & self, const Tensor & exponent, Tensor & out); // {"schema": "aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor pow(const Tensor & self, const Tensor & exponent); // {"schema": "aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & pow_out(const Scalar & self, const Tensor & exponent, Tensor & out); // {"schema": "aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor pow(const Scalar & self, const Tensor & exponent); // {"schema": "aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & pow_out(const Tensor & self, const Scalar & exponent, Tensor & out); // {"schema": "aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor pow(const Tensor & self, const Scalar & exponent); // {"schema": "aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & pow_(Tensor & self, const Scalar & exponent); // {"schema": "aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & pow_(Tensor & self, const Tensor & exponent); // {"schema": "aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & float_power_out(const Tensor & self, const Tensor & exponent, Tensor & out); // {"schema": "aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor float_power(const Tensor & self, const Tensor & exponent); // {"schema": "aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & float_power_out(const Scalar & self, const Tensor & exponent, Tensor & out); // {"schema": "aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor float_power(const Scalar & self, const Tensor & exponent); // {"schema": "aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & float_power_out(const Tensor & self, const Scalar & exponent, Tensor & out); // {"schema": "aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor float_power(const Tensor & self, const Scalar & exponent); // {"schema": "aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & float_power_(Tensor & self, const Scalar & exponent); // {"schema": "aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & float_power_(Tensor & self, const Tensor & exponent); // {"schema": "aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & normal_(Tensor & self, double mean, double std, c10::optional generator); // {"schema": "aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor normal_functional(const Tensor & self, double mean, double std, c10::optional generator); // {"schema": "aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & normal_out(const Tensor & mean, double std, c10::optional generator, Tensor & out); // {"schema": "aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor normal(const Tensor & mean, double std, c10::optional generator); // {"schema": "aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & normal_out(double mean, const Tensor & std, c10::optional generator, Tensor & out); // {"schema": "aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor normal(double mean, const Tensor & std, c10::optional generator); // {"schema": "aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & normal_out(const Tensor & mean, const Tensor & std, c10::optional generator, Tensor & out); // {"schema": "aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor normal(const Tensor & mean, const Tensor & std, c10::optional generator); // {"schema": "aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor normal(double mean, double std, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & normal_out(double mean, double std, c10::SymIntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor alias(const Tensor & self); // {"schema": "aten::alias(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +void _amp_foreach_non_finite_check_and_unscale_(TensorList self, Tensor & found_inf, const Tensor & inv_scale); // {"schema": "aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()", "dispatch": "True", "default": "False"} +Tensor & _amp_update_scale_(Tensor & self, Tensor & growth_tracker, const Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); // {"schema": "aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::vector _foreach_add(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_add_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_add(TensorList self, TensorList other, const Scalar & alpha); // {"schema": "aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_add_(TensorList self, TensorList other, const Scalar & alpha); // {"schema": "aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_add(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_add_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_add(TensorList self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::_foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_add_(TensorList self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::_foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sub(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sub_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sub(TensorList self, TensorList other, const Scalar & alpha); // {"schema": "aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sub_(TensorList self, TensorList other, const Scalar & alpha); // {"schema": "aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sub(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sub_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_mul(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_mul_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_mul(TensorList self, TensorList other); // {"schema": "aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_mul_(TensorList self, TensorList other); // {"schema": "aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_mul(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_mul_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_mul(TensorList self, const Tensor & other); // {"schema": "aten::_foreach_mul.Tensor(Tensor[] self, Tensor other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_mul_(TensorList self, const Tensor & other); // {"schema": "aten::_foreach_mul_.Tensor(Tensor(a!)[] self, Tensor other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_div(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_div_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_div(TensorList self, TensorList other); // {"schema": "aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_div_(TensorList self, TensorList other); // {"schema": "aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_div(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_div_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_div(TensorList self, const Tensor & other); // {"schema": "aten::_foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_div_(TensorList self, const Tensor & other); // {"schema": "aten::_foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_clamp_max(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_clamp_max_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_clamp_max(TensorList self, TensorList other); // {"schema": "aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_clamp_max_(TensorList self, TensorList other); // {"schema": "aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_clamp_max(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_clamp_max_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_clamp_min(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_clamp_min_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_clamp_min(TensorList self, TensorList other); // {"schema": "aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_clamp_min_(TensorList self, TensorList other); // {"schema": "aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_clamp_min(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_clamp_min_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_maximum(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_maximum_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_maximum(TensorList self, TensorList other); // {"schema": "aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_maximum_(TensorList self, TensorList other); // {"schema": "aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_maximum(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_maximum_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_minimum(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_minimum_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_minimum(TensorList self, TensorList other); // {"schema": "aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_minimum_(TensorList self, TensorList other); // {"schema": "aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_minimum(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_minimum_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcdiv(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value); // {"schema": "aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcdiv(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars); // {"schema": "aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcdiv(TensorList self, TensorList tensor1, TensorList tensor2, const Tensor & scalars); // {"schema": "aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_addcdiv_(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value); // {"schema": "aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()", "dispatch": "True", "default": "False"} +void _foreach_addcdiv_(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars); // {"schema": "aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +void _foreach_addcdiv_(TensorList self, TensorList tensor1, TensorList tensor2, const Tensor & scalars); // {"schema": "aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcmul(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value); // {"schema": "aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcmul(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars); // {"schema": "aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcmul(TensorList self, TensorList tensor1, TensorList tensor2, const Tensor & scalars); // {"schema": "aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_addcmul_(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value); // {"schema": "aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()", "dispatch": "True", "default": "False"} +void _foreach_addcmul_(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars); // {"schema": "aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +void _foreach_addcmul_(TensorList self, TensorList tensor1, TensorList tensor2, const Tensor & scalars); // {"schema": "aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_abs(TensorList self); // {"schema": "aten::_foreach_abs(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_abs_(TensorList self); // {"schema": "aten::_foreach_abs_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_acos(TensorList self); // {"schema": "aten::_foreach_acos(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_acos_(TensorList self); // {"schema": "aten::_foreach_acos_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_asin(TensorList self); // {"schema": "aten::_foreach_asin(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_asin_(TensorList self); // {"schema": "aten::_foreach_asin_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_atan(TensorList self); // {"schema": "aten::_foreach_atan(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_atan_(TensorList self); // {"schema": "aten::_foreach_atan_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_ceil(TensorList self); // {"schema": "aten::_foreach_ceil(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_ceil_(TensorList self); // {"schema": "aten::_foreach_ceil_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_cos(TensorList self); // {"schema": "aten::_foreach_cos(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_cos_(TensorList self); // {"schema": "aten::_foreach_cos_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_cosh(TensorList self); // {"schema": "aten::_foreach_cosh(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_cosh_(TensorList self); // {"schema": "aten::_foreach_cosh_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_erf(TensorList self); // {"schema": "aten::_foreach_erf(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_erf_(TensorList self); // {"schema": "aten::_foreach_erf_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_erfc(TensorList self); // {"schema": "aten::_foreach_erfc(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_erfc_(TensorList self); // {"schema": "aten::_foreach_erfc_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_exp(TensorList self); // {"schema": "aten::_foreach_exp(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_exp_(TensorList self); // {"schema": "aten::_foreach_exp_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_expm1(TensorList self); // {"schema": "aten::_foreach_expm1(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_expm1_(TensorList self); // {"schema": "aten::_foreach_expm1_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_floor(TensorList self); // {"schema": "aten::_foreach_floor(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_floor_(TensorList self); // {"schema": "aten::_foreach_floor_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_frac(TensorList self); // {"schema": "aten::_foreach_frac(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_frac_(TensorList self); // {"schema": "aten::_foreach_frac_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_lerp(TensorList self, TensorList tensors1, TensorList weights); // {"schema": "aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_lerp_(TensorList self, TensorList tensors1, TensorList weights); // {"schema": "aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_lerp(TensorList self, TensorList tensors1, const Scalar & weight); // {"schema": "aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_lerp_(TensorList self, TensorList tensors1, const Scalar & weight); // {"schema": "aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_lgamma(TensorList self); // {"schema": "aten::_foreach_lgamma(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_lgamma_(TensorList self); // {"schema": "aten::_foreach_lgamma_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_log(TensorList self); // {"schema": "aten::_foreach_log(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_log_(TensorList self); // {"schema": "aten::_foreach_log_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_log10(TensorList self); // {"schema": "aten::_foreach_log10(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_log10_(TensorList self); // {"schema": "aten::_foreach_log10_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_log1p(TensorList self); // {"schema": "aten::_foreach_log1p(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_log1p_(TensorList self); // {"schema": "aten::_foreach_log1p_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_log2(TensorList self); // {"schema": "aten::_foreach_log2(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_log2_(TensorList self); // {"schema": "aten::_foreach_log2_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_neg(TensorList self); // {"schema": "aten::_foreach_neg(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_neg_(TensorList self); // {"schema": "aten::_foreach_neg_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_norm(TensorList self, const Scalar & ord); // {"schema": "aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_pow(TensorList self, TensorList exponent); // {"schema": "aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_pow(TensorList self, const Scalar & exponent); // {"schema": "aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_pow(TensorList self, ArrayRef exponent); // {"schema": "aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_pow(const Scalar & self, TensorList exponent); // {"schema": "aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_pow_(TensorList self, TensorList exponent); // {"schema": "aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> ()", "dispatch": "True", "default": "False"} +void _foreach_pow_(TensorList self, const Scalar & exponent); // {"schema": "aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> ()", "dispatch": "True", "default": "False"} +void _foreach_pow_(TensorList self, ArrayRef exponent); // {"schema": "aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_reciprocal(TensorList self); // {"schema": "aten::_foreach_reciprocal(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_reciprocal_(TensorList self); // {"schema": "aten::_foreach_reciprocal_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_round(TensorList self); // {"schema": "aten::_foreach_round(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_round_(TensorList self); // {"schema": "aten::_foreach_round_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sigmoid(TensorList self); // {"schema": "aten::_foreach_sigmoid(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sigmoid_(TensorList self); // {"schema": "aten::_foreach_sigmoid_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sign(TensorList self); // {"schema": "aten::_foreach_sign(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sign_(TensorList self); // {"schema": "aten::_foreach_sign_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sin(TensorList self); // {"schema": "aten::_foreach_sin(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sin_(TensorList self); // {"schema": "aten::_foreach_sin_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sinh(TensorList self); // {"schema": "aten::_foreach_sinh(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sinh_(TensorList self); // {"schema": "aten::_foreach_sinh_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sqrt(TensorList self); // {"schema": "aten::_foreach_sqrt(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sqrt_(TensorList self); // {"schema": "aten::_foreach_sqrt_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_tan(TensorList self); // {"schema": "aten::_foreach_tan(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_tan_(TensorList self); // {"schema": "aten::_foreach_tan_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_tanh(TensorList self); // {"schema": "aten::_foreach_tanh(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_tanh_(TensorList self); // {"schema": "aten::_foreach_tanh_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_trunc(TensorList self); // {"schema": "aten::_foreach_trunc(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_trunc_(TensorList self); // {"schema": "aten::_foreach_trunc_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +void _foreach_zero_(TensorList self); // {"schema": "aten::_foreach_zero_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +void _foreach_copy_(TensorList self, TensorList src, bool non_blocking); // {"schema": "aten::_foreach_copy_(Tensor(a!)[] self, Tensor[] src, bool non_blocking=False) -> ()", "dispatch": "True", "default": "False"} +Tensor bucketize(const Tensor & self, const Tensor & boundaries, bool out_int32, bool right); // {"schema": "aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & bucketize_out(const Tensor & self, const Tensor & boundaries, bool out_int32, bool right, Tensor & out); // {"schema": "aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bucketize(const Scalar & self, const Tensor & boundaries, bool out_int32, bool right); // {"schema": "aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor searchsorted(const Tensor & sorted_sequence, const Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter); // {"schema": "aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & searchsorted_out(const Tensor & sorted_sequence, const Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, Tensor & out); // {"schema": "aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor searchsorted(const Tensor & sorted_sequence, const Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter); // {"schema": "aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & searchsorted_out(const Tensor & sorted_sequence, const Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, Tensor & out); // {"schema": "aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _convert_indices_from_coo_to_csr(const Tensor & self, int64_t size, bool out_int32); // {"schema": "aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _convert_indices_from_coo_to_csr_out(const Tensor & self, int64_t size, bool out_int32, Tensor & out); // {"schema": "aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _convert_indices_from_csr_to_coo(const Tensor & crow_indices, const Tensor & col_indices, bool out_int32, bool transpose); // {"schema": "aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _convert_indices_from_csr_to_coo_out(const Tensor & crow_indices, const Tensor & col_indices, bool out_int32, bool transpose, Tensor & out); // {"schema": "aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & mse_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, Tensor & out); // {"schema": "aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mse_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, Tensor & grad_input); // {"schema": "aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor", "dispatch": "True", "default": "False"} +Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & multi_margin_loss_out(const Tensor & self, const Tensor & target, const Scalar & p, const Scalar & margin, const c10::optional & weight, int64_t reduction, Tensor & out); // {"schema": "aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor multi_margin_loss(const Tensor & self, const Tensor & target, const Scalar & p, const Scalar & margin, const c10::optional & weight, int64_t reduction); // {"schema": "aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & multi_margin_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Scalar & p, const Scalar & margin, const c10::optional & weight, int64_t reduction, Tensor & grad_input); // {"schema": "aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Scalar & p, const Scalar & margin, const c10::optional & weight, int64_t reduction); // {"schema": "aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & multilabel_margin_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, Tensor & out); // {"schema": "aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple multilabel_margin_loss_forward_out(const Tensor & self, const Tensor & target, int64_t reduction, Tensor & output, Tensor & is_target); // {"schema": "aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)", "dispatch": "True", "default": "False"} +Tensor & multilabel_margin_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target, Tensor & grad_input); // {"schema": "aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target); // {"schema": "aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & nll_loss_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, Tensor & out); // {"schema": "aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nll_loss_nd(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); // {"schema": "aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor", "dispatch": "False", "default": "True"} +Tensor nll_loss(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); // {"schema": "aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple nll_loss_forward_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, Tensor & output, Tensor & total_weight); // {"schema": "aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple nll_loss_forward(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); // {"schema": "aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)", "dispatch": "True", "default": "True"} +Tensor & nll_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const Tensor & total_weight, Tensor & grad_input); // {"schema": "aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const Tensor & total_weight); // {"schema": "aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & nll_loss2d_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, Tensor & out); // {"schema": "aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nll_loss2d(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); // {"schema": "aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple nll_loss2d_forward_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, Tensor & output, Tensor & total_weight); // {"schema": "aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple nll_loss2d_forward(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); // {"schema": "aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)", "dispatch": "True", "default": "False"} +Tensor & nll_loss2d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const Tensor & total_weight, Tensor & grad_input); // {"schema": "aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const Tensor & total_weight); // {"schema": "aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & smooth_l1_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, double beta, Tensor & out); // {"schema": "aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction, double beta); // {"schema": "aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & smooth_l1_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double beta, Tensor & grad_input); // {"schema": "aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double beta); // {"schema": "aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & huber_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, double delta, Tensor & out); // {"schema": "aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor huber_loss(const Tensor & self, const Tensor & target, int64_t reduction, double delta); // {"schema": "aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & huber_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double delta, Tensor & grad_input); // {"schema": "aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor huber_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double delta); // {"schema": "aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & soft_margin_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, Tensor & out); // {"schema": "aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & soft_margin_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, Tensor & grad_input); // {"schema": "aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & elu_out(const Tensor & self, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale, Tensor & out); // {"schema": "aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor elu(const Tensor & self, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale); // {"schema": "aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & elu_backward_out(const Tensor & grad_output, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale, bool is_result, const Tensor & self_or_result, Tensor & grad_input); // {"schema": "aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor elu_backward(const Tensor & grad_output, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale, bool is_result, const Tensor & self_or_result); // {"schema": "aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & elu_(Tensor & self, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale); // {"schema": "aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & glu_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor glu(const Tensor & self, int64_t dim); // {"schema": "aten::glu(Tensor self, int dim=-1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & glu_backward_out(const Tensor & grad_output, const Tensor & self, int64_t dim, Tensor & grad_input); // {"schema": "aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim); // {"schema": "aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor glu_jvp(const Tensor & glu, const Tensor & x, const Tensor & dx, int64_t dim); // {"schema": "aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor glu_backward_jvp(const Tensor & grad_x, const Tensor & grad_glu, const Tensor & x, const Tensor & dgrad_glu, const Tensor & dx, int64_t dim); // {"schema": "aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & hardsigmoid_out(const Tensor & self, Tensor & out); // {"schema": "aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardsigmoid(const Tensor & self); // {"schema": "aten::hardsigmoid(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & hardsigmoid_(Tensor & self); // {"schema": "aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hardsigmoid_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & grad_input); // {"schema": "aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardsigmoid_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & hardtanh_out(const Tensor & self, const Scalar & min_val, const Scalar & max_val, Tensor & out); // {"schema": "aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardtanh(const Tensor & self, const Scalar & min_val, const Scalar & max_val); // {"schema": "aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & hardtanh_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & min_val, const Scalar & max_val, Tensor & grad_input); // {"schema": "aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, const Scalar & min_val, const Scalar & max_val); // {"schema": "aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & hardtanh_(Tensor & self, const Scalar & min_val, const Scalar & max_val); // {"schema": "aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & hardswish_out(const Tensor & self, Tensor & out); // {"schema": "aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardswish(const Tensor & self); // {"schema": "aten::hardswish(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & hardswish_(Tensor & self); // {"schema": "aten::hardswish_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardswish_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & leaky_relu_out(const Tensor & self, const Scalar & negative_slope, Tensor & out); // {"schema": "aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor leaky_relu(const Tensor & self, const Scalar & negative_slope); // {"schema": "aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & leaky_relu_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & negative_slope, bool self_is_result, Tensor & grad_input); // {"schema": "aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, const Scalar & negative_slope, bool self_is_result); // {"schema": "aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & leaky_relu_(Tensor & self, const Scalar & negative_slope); // {"schema": "aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log_sigmoid_out(const Tensor & self, Tensor & out); // {"schema": "aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor log_sigmoid(const Tensor & self); // {"schema": "aten::log_sigmoid(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple log_sigmoid_forward_out(const Tensor & self, Tensor & output, Tensor & buffer); // {"schema": "aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple log_sigmoid_forward(const Tensor & self); // {"schema": "aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)", "dispatch": "True", "default": "False"} +Tensor & log_sigmoid_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & buffer, Tensor & grad_input); // {"schema": "aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer); // {"schema": "aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & rrelu_with_noise_out(const Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator, Tensor & out); // {"schema": "aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator); // {"schema": "aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, bool self_is_result); // {"schema": "aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator); // {"schema": "aten::rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & softplus_out(const Tensor & self, const Scalar & beta, const Scalar & threshold, Tensor & out); // {"schema": "aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor softplus(const Tensor & self, const Scalar & beta, const Scalar & threshold); // {"schema": "aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & softplus_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & beta, const Scalar & threshold, Tensor & grad_input); // {"schema": "aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, const Scalar & beta, const Scalar & threshold); // {"schema": "aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & softshrink_out(const Tensor & self, const Scalar & lambd, Tensor & out); // {"schema": "aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor softshrink(const Tensor & self, const Scalar & lambd); // {"schema": "aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & softshrink_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & lambd, Tensor & grad_input); // {"schema": "aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, const Scalar & lambd); // {"schema": "aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & adaptive_avg_pool2d_out(const Tensor & self, c10::SymIntArrayRef output_size, Tensor & out); // {"schema": "aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor adaptive_avg_pool2d(const Tensor & self, c10::SymIntArrayRef output_size); // {"schema": "aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor", "dispatch": "False", "default": "True"} +Tensor mkldnn_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & mkldnn_adaptive_avg_pool2d_out(const Tensor & self, IntArrayRef output_size, Tensor & out); // {"schema": "aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mkldnn_adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _adaptive_avg_pool2d(const Tensor & self, c10::SymIntArrayRef output_size); // {"schema": "aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & adaptive_avg_pool3d_out(const Tensor & self, c10::SymIntArrayRef output_size, Tensor & out); // {"schema": "aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor adaptive_avg_pool3d(const Tensor & self, c10::SymIntArrayRef output_size); // {"schema": "aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _adaptive_avg_pool3d(const Tensor & self, c10::SymIntArrayRef output_size); // {"schema": "aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & adaptive_avg_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & grad_input); // {"schema": "aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple adaptive_max_pool2d_out(const Tensor & self, IntArrayRef output_size, Tensor & out, Tensor & indices); // {"schema": "aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple adaptive_max_pool2d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & adaptive_max_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices); // {"schema": "aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple adaptive_max_pool3d_out(const Tensor & self, IntArrayRef output_size, Tensor & out, Tensor & indices); // {"schema": "aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & adaptive_max_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices); // {"schema": "aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & avg_pool2d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, Tensor & out); // {"schema": "aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); // {"schema": "aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & avg_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, Tensor & grad_input); // {"schema": "aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); // {"schema": "aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & avg_pool3d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, Tensor & out); // {"schema": "aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); // {"schema": "aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & avg_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, Tensor & grad_input); // {"schema": "aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); // {"schema": "aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple fractional_max_pool2d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples, Tensor & output, Tensor & indices); // {"schema": "aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple fractional_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); // {"schema": "aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & fractional_max_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fractional_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); // {"schema": "aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple fractional_max_pool3d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples, Tensor & output, Tensor & indices); // {"schema": "aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); // {"schema": "aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & fractional_max_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); // {"schema": "aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple max_pool2d_with_indices_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out, Tensor & indices); // {"schema": "aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple max_pool2d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & max_pool2d_with_indices_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max_pool2d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); // {"schema": "aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple max_pool3d_with_indices_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out, Tensor & indices); // {"schema": "aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & max_pool3d_with_indices_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); // {"schema": "aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & max_unpool2d_out(const Tensor & self, const Tensor & indices, c10::SymIntArrayRef output_size, Tensor & out); // {"schema": "aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max_unpool2d(const Tensor & self, const Tensor & indices, c10::SymIntArrayRef output_size); // {"schema": "aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & max_unpool3d_out(const Tensor & self, const Tensor & indices, c10::SymIntArrayRef output_size, IntArrayRef stride, IntArrayRef padding, Tensor & out); // {"schema": "aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max_unpool3d(const Tensor & self, const Tensor & indices, c10::SymIntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); // {"schema": "aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & reflection_pad1d_out(const Tensor & self, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad1d(const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & reflection_pad1d_backward_out(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding, Tensor & grad_input); // {"schema": "aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & reflection_pad2d_out(const Tensor & self, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad2d(const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & reflection_pad2d_backward_out(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding, Tensor & grad_input); // {"schema": "aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & reflection_pad3d_out(const Tensor & self, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad3d(const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & reflection_pad3d_backward_out(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding, Tensor & grad_input); // {"schema": "aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad3d_backward(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad1d_out(const Tensor & self, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad1d(const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad1d_backward_out(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding, Tensor & grad_input); // {"schema": "aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad2d_out(const Tensor & self, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad2d(const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad2d_backward_out(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding, Tensor & grad_input); // {"schema": "aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & replication_pad3d_out(const Tensor & self, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad3d(const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad3d_backward_out(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding, Tensor & grad_input); // {"schema": "aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _pad_circular(const Tensor & self, c10::SymIntArrayRef pad); // {"schema": "aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _pad_enum(const Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional value); // {"schema": "aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor pad(const Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional value); // {"schema": "aten::pad(Tensor self, SymInt[] pad, str mode=\"constant\", float? value=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_linear1d(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_bilinear2d(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _upsample_bilinear2d_aa(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_trilinear3d(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_bicubic2d(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _upsample_bicubic2d_aa(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_nearest1d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _upsample_nearest_exact1d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_nearest2d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _upsample_nearest_exact2d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_nearest3d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _upsample_nearest_exact3d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & upsample_linear1d_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales, Tensor & out); // {"schema": "aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_linear1d(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales); // {"schema": "aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_linear1d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales, Tensor & grad_input); // {"schema": "aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_linear1d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales); // {"schema": "aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_bilinear2d_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_bilinear2d(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_bilinear2d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_bilinear2d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _upsample_bilinear2d_aa_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _upsample_bilinear2d_aa(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _upsample_bilinear2d_aa_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _upsample_bilinear2d_aa_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_bicubic2d_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_bicubic2d(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_bicubic2d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_bicubic2d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _upsample_bicubic2d_aa_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _upsample_bicubic2d_aa(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _upsample_bicubic2d_aa_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _upsample_bicubic2d_aa_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_trilinear3d_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_trilinear3d(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_trilinear3d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_trilinear3d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest1d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, Tensor & out); // {"schema": "aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact1d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, Tensor & out); // {"schema": "aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest1d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales); // {"schema": "aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact1d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales); // {"schema": "aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest1d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, Tensor & grad_input); // {"schema": "aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact1d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, Tensor & grad_input); // {"schema": "aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest1d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales); // {"schema": "aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact1d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales); // {"schema": "aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest2d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact2d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest2d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact2d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest2d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact2d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest2d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact2d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest3d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact3d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest3d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact3d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest3d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact3d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest3d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact3d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sigmoid_backward_out(const Tensor & grad_output, const Tensor & output, Tensor & grad_input); // {"schema": "aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output); // {"schema": "aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logit_backward_out(const Tensor & grad_output, const Tensor & self, c10::optional eps, Tensor & grad_input); // {"schema": "aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logit_backward(const Tensor & grad_output, const Tensor & self, c10::optional eps); // {"schema": "aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tanh_backward_out(const Tensor & grad_output, const Tensor & output, Tensor & grad_input); // {"schema": "aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor tanh_backward(const Tensor & grad_output, const Tensor & output); // {"schema": "aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & slow_conv_transpose2d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, Tensor & out); // {"schema": "aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor slow_conv_transpose2d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation); // {"schema": "aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & slow_conv_transpose3d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, Tensor & out); // {"schema": "aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor slow_conv_transpose3d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation); // {"schema": "aten::slow_conv_transpose3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & thnn_conv2d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); // {"schema": "aten::thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & _slow_conv2d_forward_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, Tensor & output); // {"schema": "aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) output) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _slow_conv2d_forward(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); // {"schema": "aten::_slow_conv2d_forward(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _slow_conv2d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias); // {"schema": "aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "False"} +::std::tuple _slow_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask); // {"schema": "aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)", "dispatch": "True", "default": "False"} +const Tensor & _conv_depthwise2d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, const Tensor & out); // {"schema": "aten::_conv_depthwise2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _conv_depthwise2d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); // {"schema": "aten::_conv_depthwise2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation) -> Tensor", "dispatch": "True", "default": "False"} +Tensor conv_depthwise3d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); // {"schema": "aten::conv_depthwise3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & slow_conv3d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor slow_conv3d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); // {"schema": "aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & slow_conv3d_forward_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, Tensor & output); // {"schema": "aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor slow_conv3d_forward(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); // {"schema": "aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor slow_conv_dilated2d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); // {"schema": "aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor slow_conv_dilated3d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); // {"schema": "aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & col2im_out(const Tensor & self, c10::SymIntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor & out); // {"schema": "aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor col2im(const Tensor & self, c10::SymIntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); // {"schema": "aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor", "dispatch": "True", "default": "False"} +Tensor column_stack(TensorList tensors); // {"schema": "aten::column_stack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & column_stack_out(TensorList tensors, Tensor & out); // {"schema": "aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & im2col_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor & out); // {"schema": "aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor im2col(const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); // {"schema": "aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor", "dispatch": "True", "default": "False"} +Tensor isfinite(const Tensor & self); // {"schema": "aten::isfinite(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor isinf(const Tensor & self); // {"schema": "aten::isinf(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +void record_stream(Tensor & self, Stream s); // {"schema": "aten::record_stream(Tensor(a!) self, Stream s) -> ()", "dispatch": "True", "default": "False"} +Tensor isposinf(const Tensor & self); // {"schema": "aten::isposinf(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isposinf_out(const Tensor & self, Tensor & out); // {"schema": "aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor isneginf(const Tensor & self); // {"schema": "aten::isneginf(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isneginf_out(const Tensor & self, Tensor & out); // {"schema": "aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _add_batch_dim(const Tensor & self, int64_t batch_dim, int64_t level); // {"schema": "aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _remove_batch_dim(const Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim); // {"schema": "aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_entr(const Tensor & self); // {"schema": "aten::special_entr(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_entr_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_ndtri(const Tensor & self); // {"schema": "aten::special_ndtri(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_ndtri_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_log_ndtr(const Tensor & self); // {"schema": "aten::special_log_ndtr(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_log_ndtr_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_expm1(const Tensor & self); // {"schema": "aten::special_expm1(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_expm1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_exp2(const Tensor & self); // {"schema": "aten::special_exp2(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_exp2_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_psi(const Tensor & self); // {"schema": "aten::special_psi(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_psi_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_digamma(const Tensor & self); // {"schema": "aten::special_digamma(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_digamma_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_gammaln(const Tensor & self); // {"schema": "aten::special_gammaln(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_gammaln_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_erf(const Tensor & self); // {"schema": "aten::special_erf(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_erf_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_erfc(const Tensor & self); // {"schema": "aten::special_erfc(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_erfc_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_erfcx(const Tensor & self); // {"schema": "aten::special_erfcx(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_erfcx_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_erfinv(const Tensor & self); // {"schema": "aten::special_erfinv(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_erfinv_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_ndtr(const Tensor & self); // {"schema": "aten::special_ndtr(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_ndtr_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_xlog1py(const Tensor & self, const Tensor & other); // {"schema": "aten::special_xlog1py(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_xlog1py(const Scalar & self, const Tensor & other); // {"schema": "aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_xlog1py(const Tensor & self, const Scalar & other); // {"schema": "aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_xlog1py_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_xlog1py_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_xlog1py_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_xlogy(const Tensor & self, const Tensor & other); // {"schema": "aten::special_xlogy(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_xlogy(const Scalar & self, const Tensor & other); // {"schema": "aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_xlogy(const Tensor & self, const Scalar & other); // {"schema": "aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_xlogy_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_xlogy_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_xlogy_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_zeta(const Tensor & self, const Tensor & other); // {"schema": "aten::special_zeta(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_zeta(const Scalar & self, const Tensor & other); // {"schema": "aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_zeta(const Tensor & self, const Scalar & other); // {"schema": "aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_zeta_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_zeta_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_zeta_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_i0(const Tensor & self); // {"schema": "aten::special_i0(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_i0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_i0e(const Tensor & self); // {"schema": "aten::special_i0e(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_i0e_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_i1(const Tensor & self); // {"schema": "aten::special_i1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_i1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_i1e(const Tensor & self); // {"schema": "aten::special_i1e(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_i1e_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_logit(const Tensor & self, c10::optional eps); // {"schema": "aten::special_logit(Tensor self, float? eps=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_logit_out(const Tensor & self, c10::optional eps, Tensor & out); // {"schema": "aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_polygamma(int64_t n, const Tensor & self); // {"schema": "aten::special_polygamma(int n, Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_polygamma_out(int64_t n, const Tensor & self, Tensor & out); // {"schema": "aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_logsumexp(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_logsumexp_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_expit(const Tensor & self); // {"schema": "aten::special_expit(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_expit_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_sinc(const Tensor & self); // {"schema": "aten::special_sinc(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_sinc_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_round(const Tensor & self, int64_t decimals); // {"schema": "aten::special_round(Tensor self, *, int decimals=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_round_out(const Tensor & self, int64_t decimals, Tensor & out); // {"schema": "aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_log1p(const Tensor & self); // {"schema": "aten::special_log1p(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_log1p_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_log_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_gammainc_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_gammainc(const Tensor & self, const Tensor & other); // {"schema": "aten::special_gammainc(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_gammaincc_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_gammaincc(const Tensor & self, const Tensor & other); // {"schema": "aten::special_gammaincc(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_multigammaln(const Tensor & self, int64_t p); // {"schema": "aten::special_multigammaln(Tensor self, int p) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_multigammaln_out(const Tensor & self, int64_t p, Tensor & out); // {"schema": "aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fft_fft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_fft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ifft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_ifft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_rfft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_rfft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_irfft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_irfft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_hfft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_hfft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ihfft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_ihfft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_fft2(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_fft2_out(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ifft2(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_ifft2_out(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_rfft2(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_rfft2_out(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_irfft2(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_irfft2_out(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_hfft2(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +const Tensor & fft_hfft2_out(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm, const Tensor & out); // {"schema": "aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ihfft2(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +const Tensor & fft_ihfft2_out(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm, const Tensor & out); // {"schema": "aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_fftn(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_fftn_out(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ifftn(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_ifftn_out(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_rfftn(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_rfftn_out(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_irfftn(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_irfftn_out(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_hfftn(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +const Tensor & fft_hfftn_out(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, const Tensor & out); // {"schema": "aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ihfftn(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +const Tensor & fft_ihfftn_out(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, const Tensor & out); // {"schema": "aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_fftfreq(int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fft_fftfreq_out(int64_t n, double d, Tensor & out); // {"schema": "aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor fft_rfftfreq(int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fft_rfftfreq_out(int64_t n, double d, Tensor & out); // {"schema": "aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor fft_fftshift(const Tensor & self, OptionalIntArrayRef dim); // {"schema": "aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fft_ifftshift(const Tensor & self, OptionalIntArrayRef dim); // {"schema": "aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple linalg_cholesky_ex(const Tensor & self, bool upper, bool check_errors); // {"schema": "aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple linalg_cholesky_ex_out(const Tensor & self, bool upper, bool check_errors, Tensor & L, Tensor & info); // {"schema": "aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)", "dispatch": "True", "default": "False"} +Tensor linalg_cholesky(const Tensor & self, bool upper); // {"schema": "aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_cholesky_out(const Tensor & self, bool upper, Tensor & out); // {"schema": "aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_cross(const Tensor & self, const Tensor & other, int64_t dim); // {"schema": "aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_cross_out(const Tensor & self, const Tensor & other, int64_t dim, Tensor & out); // {"schema": "aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple linalg_lu_factor(const Tensor & A, bool pivot); // {"schema": "aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)", "dispatch": "False", "default": "True"} +::std::tuple linalg_lu_factor_out(const Tensor & A, bool pivot, Tensor & LU, Tensor & pivots); // {"schema": "aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)", "dispatch": "False", "default": "True"} +::std::tuple linalg_lu_factor_ex(const Tensor & A, bool pivot, bool check_errors); // {"schema": "aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple linalg_lu_factor_ex_out(const Tensor & A, bool pivot, bool check_errors, Tensor & LU, Tensor & pivots, Tensor & info); // {"schema": "aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)", "dispatch": "True", "default": "False"} +::std::tuple linalg_lu(const Tensor & A, bool pivot); // {"schema": "aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)", "dispatch": "True", "default": "True"} +::std::tuple linalg_lu_out(const Tensor & A, bool pivot, Tensor & P, Tensor & L, Tensor & U); // {"schema": "aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)", "dispatch": "True", "default": "False"} +Tensor linalg_lu_solve(const Tensor & LU, const Tensor & pivots, const Tensor & B, bool left, bool adjoint); // {"schema": "aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_lu_solve_out(const Tensor & LU, const Tensor & pivots, const Tensor & B, bool left, bool adjoint, Tensor & out); // {"schema": "aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple _linalg_det(const Tensor & A); // {"schema": "aten::_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_det_out(const Tensor & A, Tensor & result, Tensor & LU, Tensor & pivots); // {"schema": "aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)", "dispatch": "True", "default": "False"} +Tensor linalg_det(const Tensor & A); // {"schema": "aten::linalg_det(Tensor A) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_det_out(const Tensor & A, Tensor & out); // {"schema": "aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor det(const Tensor & self); // {"schema": "aten::det(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple linalg_ldl_factor_ex(const Tensor & self, bool hermitian, bool check_errors); // {"schema": "aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple linalg_ldl_factor_ex_out(const Tensor & self, bool hermitian, bool check_errors, Tensor & LD, Tensor & pivots, Tensor & info); // {"schema": "aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)", "dispatch": "True", "default": "False"} +::std::tuple linalg_ldl_factor(const Tensor & self, bool hermitian); // {"schema": "aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)", "dispatch": "False", "default": "True"} +::std::tuple linalg_ldl_factor_out(const Tensor & self, bool hermitian, Tensor & LD, Tensor & pivots); // {"schema": "aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)", "dispatch": "False", "default": "True"} +Tensor linalg_ldl_solve(const Tensor & LD, const Tensor & pivots, const Tensor & B, bool hermitian); // {"schema": "aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_ldl_solve_out(const Tensor & LD, const Tensor & pivots, const Tensor & B, bool hermitian, Tensor & out); // {"schema": "aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple linalg_lstsq(const Tensor & self, const Tensor & b, c10::optional rcond, c10::optional driver); // {"schema": "aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)", "dispatch": "True", "default": "True"} +::std::tuple linalg_lstsq_out(const Tensor & self, const Tensor & b, c10::optional rcond, c10::optional driver, Tensor & solution, Tensor & residuals, Tensor & rank, Tensor & singular_values); // {"schema": "aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)", "dispatch": "True", "default": "False"} +Tensor linalg_matmul(const Tensor & self, const Tensor & other); // {"schema": "aten::linalg_matmul(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matmul_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_vecdot(const Tensor & x, const Tensor & y, int64_t dim); // {"schema": "aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_vecdot_out(const Tensor & x, const Tensor & y, int64_t dim, Tensor & out); // {"schema": "aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_exp(const Tensor & self); // {"schema": "aten::linalg_matrix_exp(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _linalg_slogdet(const Tensor & A); // {"schema": "aten::_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_slogdet_out(const Tensor & A, Tensor & sign, Tensor & logabsdet, Tensor & LU, Tensor & pivots); // {"schema": "aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)", "dispatch": "True", "default": "False"} +::std::tuple linalg_slogdet(const Tensor & A); // {"schema": "aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)", "dispatch": "False", "default": "True"} +::std::tuple linalg_slogdet_out(const Tensor & A, Tensor & sign, Tensor & logabsdet); // {"schema": "aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)", "dispatch": "False", "default": "True"} +::std::tuple slogdet(const Tensor & self); // {"schema": "aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)", "dispatch": "False", "default": "True"} +::std::tuple slogdet_out(const Tensor & self, Tensor & sign, Tensor & logabsdet); // {"schema": "aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)", "dispatch": "False", "default": "True"} +Tensor logdet(const Tensor & self); // {"schema": "aten::logdet(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple linalg_eig(const Tensor & self); // {"schema": "aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)", "dispatch": "True", "default": "False"} +::std::tuple linalg_eig_out(const Tensor & self, Tensor & eigenvalues, Tensor & eigenvectors); // {"schema": "aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)", "dispatch": "True", "default": "False"} +Tensor _linalg_eigvals(const Tensor & self); // {"schema": "aten::_linalg_eigvals(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor linalg_eigvals(const Tensor & self); // {"schema": "aten::linalg_eigvals(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_eigvals_out(const Tensor & self, Tensor & out); // {"schema": "aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple _linalg_eigh(const Tensor & A, c10::string_view UPLO, bool compute_v); // {"schema": "aten::_linalg_eigh(Tensor A, str UPLO=\"L\", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_eigh_out(const Tensor & A, c10::string_view UPLO, bool compute_v, Tensor & eigenvalues, Tensor & eigenvectors); // {"schema": "aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO=\"L\", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)", "dispatch": "True", "default": "False"} +::std::tuple linalg_eigh(const Tensor & self, c10::string_view UPLO); // {"schema": "aten::linalg_eigh(Tensor self, str UPLO=\"L\") -> (Tensor eigenvalues, Tensor eigenvectors)", "dispatch": "False", "default": "True"} +::std::tuple linalg_eigh_out(const Tensor & self, c10::string_view UPLO, Tensor & eigvals, Tensor & eigvecs); // {"schema": "aten::linalg_eigh.eigvals(Tensor self, str UPLO=\"L\", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)", "dispatch": "False", "default": "True"} +Tensor linalg_eigvalsh(const Tensor & self, c10::string_view UPLO); // {"schema": "aten::linalg_eigvalsh(Tensor self, str UPLO=\"L\") -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_eigvalsh_out(const Tensor & self, c10::string_view UPLO, Tensor & out); // {"schema": "aten::linalg_eigvalsh.out(Tensor self, str UPLO=\"L\", *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_householder_product(const Tensor & input, const Tensor & tau); // {"schema": "aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & linalg_householder_product_out(const Tensor & input, const Tensor & tau, Tensor & out); // {"schema": "aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple linalg_inv_ex(const Tensor & A, bool check_errors); // {"schema": "aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple linalg_inv_ex_out(const Tensor & A, bool check_errors, Tensor & inverse, Tensor & info); // {"schema": "aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)", "dispatch": "True", "default": "False"} +Tensor linalg_inv(const Tensor & A); // {"schema": "aten::linalg_inv(Tensor A) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_inv_out(const Tensor & A, Tensor & out); // {"schema": "aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor inverse(const Tensor & self); // {"schema": "aten::inverse(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & inverse_out(const Tensor & self, Tensor & out); // {"schema": "aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor inner(const Tensor & self, const Tensor & other); // {"schema": "aten::inner(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & inner_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor outer(const Tensor & self, const Tensor & vec2); // {"schema": "aten::outer(Tensor self, Tensor vec2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & outer_out(const Tensor & self, const Tensor & vec2, Tensor & out); // {"schema": "aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor ger(const Tensor & self, const Tensor & vec2); // {"schema": "aten::ger(Tensor self, Tensor vec2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & ger_out(const Tensor & self, const Tensor & vec2, Tensor & out); // {"schema": "aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_norm(const Tensor & self, const c10::optional & ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor linalg_norm(const Tensor & self, c10::string_view ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_norm_out(const Tensor & self, const c10::optional & ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & linalg_norm_out(const Tensor & self, c10::string_view ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_vector_norm(const Tensor & self, const Scalar & ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_vector_norm_out(const Tensor & self, const Scalar & ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor linalg_matrix_norm(const Tensor & self, const Scalar & ord, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_norm_out(const Tensor & self, const Scalar & ord, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_norm(const Tensor & self, c10::string_view ord, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_norm_out(const Tensor & self, c10::string_view ord, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple _linalg_svd(const Tensor & A, bool full_matrices, bool compute_uv, c10::optional driver); // {"schema": "aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_svd_out(const Tensor & A, bool full_matrices, bool compute_uv, c10::optional driver, Tensor & U, Tensor & S, Tensor & Vh); // {"schema": "aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)", "dispatch": "True", "default": "False"} +::std::tuple linalg_svd(const Tensor & A, bool full_matrices, c10::optional driver); // {"schema": "aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)", "dispatch": "False", "default": "True"} +::std::tuple linalg_svd_out(const Tensor & A, bool full_matrices, c10::optional driver, Tensor & U, Tensor & S, Tensor & Vh); // {"schema": "aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)", "dispatch": "False", "default": "True"} +Tensor linalg_svdvals(const Tensor & A, c10::optional driver); // {"schema": "aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_svdvals_out(const Tensor & A, c10::optional driver, Tensor & out); // {"schema": "aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_cond(const Tensor & self, const c10::optional & p); // {"schema": "aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_cond_out(const Tensor & self, const c10::optional & p, Tensor & out); // {"schema": "aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_cond(const Tensor & self, c10::string_view p); // {"schema": "aten::linalg_cond.p_str(Tensor self, str p) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_cond_out(const Tensor & self, c10::string_view p, Tensor & out); // {"schema": "aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_pinv(const Tensor & self, const c10::optional & atol, const c10::optional & rtol, bool hermitian); // {"schema": "aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_pinv_out(const Tensor & self, const c10::optional & atol, const c10::optional & rtol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor linalg_pinv(const Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian); // {"schema": "aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_pinv_out(const Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_pinv(const Tensor & self, double rcond, bool hermitian); // {"schema": "aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor linalg_pinv(const Tensor & self, const Tensor & rcond, bool hermitian); // {"schema": "aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_pinv_out(const Tensor & self, double rcond, bool hermitian, Tensor & out); // {"schema": "aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & linalg_pinv_out(const Tensor & self, const Tensor & rcond, bool hermitian, Tensor & out); // {"schema": "aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple _linalg_solve_ex(const Tensor & A, const Tensor & B, bool left, bool check_errors); // {"schema": "aten::_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_solve_ex_out(const Tensor & A, const Tensor & B, bool left, bool check_errors, Tensor & result, Tensor & LU, Tensor & pivots, Tensor & info); // {"schema": "aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)", "dispatch": "True", "default": "False"} +::std::tuple linalg_solve_ex(const Tensor & A, const Tensor & B, bool left, bool check_errors); // {"schema": "aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)", "dispatch": "False", "default": "True"} +::std::tuple linalg_solve_ex_out(const Tensor & A, const Tensor & B, bool left, bool check_errors, Tensor & result, Tensor & info); // {"schema": "aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)", "dispatch": "False", "default": "True"} +Tensor linalg_solve(const Tensor & A, const Tensor & B, bool left); // {"schema": "aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_solve_out(const Tensor & A, const Tensor & B, bool left, Tensor & out); // {"schema": "aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_tensorinv(const Tensor & self, int64_t ind); // {"schema": "aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_tensorinv_out(const Tensor & self, int64_t ind, Tensor & out); // {"schema": "aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_tensorsolve(const Tensor & self, const Tensor & other, OptionalIntArrayRef dims); // {"schema": "aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_tensorsolve_out(const Tensor & self, const Tensor & other, OptionalIntArrayRef dims, Tensor & out); // {"schema": "aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple linalg_qr(const Tensor & A, c10::string_view mode); // {"schema": "aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)", "dispatch": "True", "default": "True"} +::std::tuple linalg_qr_out(const Tensor & A, c10::string_view mode, Tensor & Q, Tensor & R); // {"schema": "aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)", "dispatch": "True", "default": "False"} +Tensor linalg_matrix_power(const Tensor & self, int64_t n); // {"schema": "aten::linalg_matrix_power(Tensor self, int n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_power_out(const Tensor & self, int64_t n, Tensor & out); // {"schema": "aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_rank(const Tensor & input, const c10::optional & atol, const c10::optional & rtol, bool hermitian); // {"schema": "aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_rank_out(const Tensor & input, const c10::optional & atol, const c10::optional & rtol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_rank(const Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian); // {"schema": "aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_rank_out(const Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_rank(const Tensor & self, double tol, bool hermitian); // {"schema": "aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_rank_out(const Tensor & self, double tol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_rank(const Tensor & input, const Tensor & tol, bool hermitian); // {"schema": "aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_rank_out(const Tensor & input, const Tensor & tol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_multi_dot(TensorList tensors); // {"schema": "aten::linalg_multi_dot(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_multi_dot_out(TensorList tensors, Tensor & out); // {"schema": "aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nested_to_padded_tensor(const Tensor & self, double padding, OptionalIntArrayRef output_size); // {"schema": "aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_serialization_subcmul(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_parallel_materialize(const Tensor & self, int64_t num_parallel, bool skip_first); // {"schema": "aten::_test_parallel_materialize(Tensor self, int num_parallel, bool skip_first=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _test_optional_intlist(const Tensor & values, OptionalIntArrayRef addends); // {"schema": "aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _test_optional_filled_intlist(const Tensor & values, OptionalIntArrayRef addends); // {"schema": "aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _test_optional_floatlist(const Tensor & values, c10::optional> addends); // {"schema": "aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _test_string_default(const Tensor & dummy, c10::string_view a, c10::string_view b); // {"schema": "aten::_test_string_default(Tensor dummy, str a=\"\\\"'\\\\\", str b='\"\\'\\\\') -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_ambiguous_defaults(const Tensor & dummy, int64_t a, int64_t b); // {"schema": "aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_ambiguous_defaults(const Tensor & dummy, int64_t a, c10::string_view b); // {"schema": "aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b=\"2\") -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_warn_in_autograd(const Tensor & self); // {"schema": "aten::_test_warn_in_autograd(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _test_autograd_multiple_dispatch(const Tensor & self); // {"schema": "aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _test_autograd_multiple_dispatch(const Tensor & self, bool b); // {"schema": "aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _test_autograd_multiple_dispatch_view(const Tensor & self); // {"schema": "aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor _test_autograd_multiple_dispatch_view_copy(const Tensor & self); // {"schema": "aten::_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor segment_reduce(const Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial); // {"schema": "aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _segment_reduce_backward(const Tensor & grad, const Tensor & output, const Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & offsets, int64_t axis, const c10::optional & initial); // {"schema": "aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor pad_sequence(TensorList sequences, bool batch_first, double padding_value); // {"schema": "aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor flatten_dense_tensors(TensorList tensors); // {"schema": "aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector unflatten_dense_tensors(const Tensor & flat, TensorList tensors); // {"schema": "aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor _nested_tensor_from_tensor_list(TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _fw_primal_copy(const Tensor & self, int64_t level); // {"schema": "aten::_fw_primal_copy(Tensor self, int level) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _make_dual_copy(const Tensor & primal, const Tensor & tangent, int64_t level); // {"schema": "aten::_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor", "dispatch": "True", "default": "True"} +Tensor view_as_real_copy(const Tensor & self); // {"schema": "aten::view_as_real_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor view_as_complex_copy(const Tensor & self); // {"schema": "aten::view_as_complex_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _conj_copy(const Tensor & self); // {"schema": "aten::_conj_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _neg_view_copy(const Tensor & self); // {"schema": "aten::_neg_view_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor as_strided_copy(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); // {"schema": "aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _sparse_broadcast_to_copy(const Tensor & self, IntArrayRef size); // {"schema": "aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor", "dispatch": "True", "default": "True"} +Tensor diagonal_copy(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor expand_copy(const Tensor & self, c10::SymIntArrayRef size, bool implicit); // {"schema": "aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor permute_copy(const Tensor & self, IntArrayRef dims); // {"schema": "aten::permute_copy(Tensor self, int[] dims) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _reshape_alias_copy(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor", "dispatch": "True", "default": "True"} +Tensor select_copy(const Tensor & self, int64_t dim, c10::SymInt index); // {"schema": "aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor", "dispatch": "True", "default": "True"} +Tensor detach_copy(const Tensor & self); // {"schema": "aten::detach_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor slice_copy(const Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); // {"schema": "aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor", "dispatch": "True", "default": "True"} +::std::vector split_copy(const Tensor & self, c10::SymInt split_size, int64_t dim); // {"schema": "aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +::std::vector split_with_sizes_copy(const Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim); // {"schema": "aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +Tensor squeeze_copy(const Tensor & self); // {"schema": "aten::squeeze_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor squeeze_copy(const Tensor & self, int64_t dim); // {"schema": "aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor squeeze_copy(const Tensor & self, IntArrayRef dim); // {"schema": "aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor t_copy(const Tensor & self); // {"schema": "aten::t_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor transpose_copy(const Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor unsqueeze_copy(const Tensor & self, int64_t dim); // {"schema": "aten::unsqueeze_copy(Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _indices_copy(const Tensor & self); // {"schema": "aten::_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _values_copy(const Tensor & self); // {"schema": "aten::_values_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor indices_copy(const Tensor & self); // {"schema": "aten::indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor values_copy(const Tensor & self); // {"schema": "aten::values_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor crow_indices_copy(const Tensor & self); // {"schema": "aten::crow_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor col_indices_copy(const Tensor & self); // {"schema": "aten::col_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor ccol_indices_copy(const Tensor & self); // {"schema": "aten::ccol_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor row_indices_copy(const Tensor & self); // {"schema": "aten::row_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +::std::vector unbind_copy(const Tensor & self, int64_t dim); // {"schema": "aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +void unbind_copy_out(const Tensor & self, int64_t dim, TensorList out); // {"schema": "aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void split_copy_out(const Tensor & self, c10::SymInt split_size, int64_t dim, TensorList out); // {"schema": "aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void split_with_sizes_copy_out(const Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, TensorList out); // {"schema": "aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor view_copy(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::view_copy(Tensor self, SymInt[] size) -> Tensor", "dispatch": "True", "default": "True"} +Tensor view_copy(const Tensor & self, ScalarType dtype); // {"schema": "aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor unfold_copy(const Tensor & self, int64_t dimension, int64_t size, int64_t step); // {"schema": "aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor", "dispatch": "True", "default": "True"} +Tensor alias_copy(const Tensor & self); // {"schema": "aten::alias_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor to_padded_tensor(const Tensor & self, double padding, OptionalSymIntArrayRef output_size); // {"schema": "aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_softmax_with_shape(const Tensor & self, const Tensor & query); // {"schema": "aten::_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _transformer_encoder_layer_fwd(const Tensor & src, int64_t embed_dim, int64_t num_heads, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const Tensor & norm_weight_1, const Tensor & norm_bias_1, const Tensor & norm_weight_2, const Tensor & norm_bias_2, const Tensor & ffn_weight_1, const Tensor & ffn_bias_1, const Tensor & ffn_weight_2, const Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type); // {"schema": "aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _native_multi_head_attention(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type); // {"schema": "aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor scaled_dot_product_attention(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & attn_mask, double dropout_p, bool is_causal, c10::optional scale); // {"schema": "aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> Tensor", "dispatch": "False", "default": "True"} +int64_t _fused_sdp_choice(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & attn_mask, double dropout_p, bool is_causal, c10::optional scale); // {"schema": "aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> int", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_attention_math(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & attn_mask, double dropout_p, bool is_causal, const c10::optional & dropout_mask, c10::optional scale); // {"schema": "aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple _scaled_dot_product_flash_attention(const Tensor & query, const Tensor & key, const Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale); // {"schema": "aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_flash_attention_for_cpu(const Tensor & query, const Tensor & key, const Tensor & value, double dropout_p, bool is_causal, const c10::optional & attn_mask, c10::optional scale); // {"schema": "aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_flash_attention_backward(const Tensor & grad_out, const Tensor & query, const Tensor & key, const Tensor & value, const Tensor & out, const Tensor & logsumexp, const Tensor & cum_seq_q, const Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const Tensor & philox_seed, const Tensor & philox_offset, c10::optional scale); // {"schema": "aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_flash_attention_for_cpu_backward(const Tensor & grad_out, const Tensor & query, const Tensor & key, const Tensor & value, const Tensor & out, const Tensor & logsumexp, double dropout_p, bool is_causal, const c10::optional & attn_mask, c10::optional scale); // {"schema": "aten::_scaled_dot_product_flash_attention_for_cpu_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, float dropout_p, bool is_causal, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_efficient_attention(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, c10::optional scale); // {"schema": "aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_efficient_attention_backward(const Tensor & grad_out_, const Tensor & query, const Tensor & key, const Tensor & value, const Tensor & attn_bias, const Tensor & out, const Tensor & logsumexp, const Tensor & philox_seed, const Tensor & philox_offset, double dropout_p, ::std::array grad_input_mask, bool is_causal, c10::optional scale); // {"schema": "aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_cudnn_attention(const Tensor & query, const Tensor & key, const Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale); // {"schema": "aten::_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset)", "dispatch": "True", "default": "False"} +::std::tuple _flash_attention_forward(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & cum_seq_q, const c10::optional & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale); // {"schema": "aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, bool return_debug_mask, *, float? scale=None) -> (Tensor output, Tensor softmax_logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)", "dispatch": "True", "default": "False"} +::std::tuple _flash_attention_backward(const Tensor & grad_out, const Tensor & query, const Tensor & key, const Tensor & value, const Tensor & out, const Tensor & logsumexp, const Tensor & cum_seq_q, const Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const Tensor & philox_seed, const Tensor & philox_offset, c10::optional scale); // {"schema": "aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _efficient_attention_forward(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & bias, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, c10::optional max_seqlen_q, c10::optional max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp, c10::optional scale, const c10::optional & causal_diagonal, const c10::optional & seqlen_k); // {"schema": "aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, int? max_seqlen_k, float dropout_p, int custom_mask_type, bool compute_log_sumexp=False, *, float? scale=None, Tensor? causal_diagonal=None, Tensor? seqlen_k=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, SymInt max_seqlen_batch_q, SymInt max_seqlen_batch_k)", "dispatch": "True", "default": "False"} +::std::tuple _efficient_attention_backward(const Tensor & grad_out_, const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & bias, const Tensor & out, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, c10::SymInt max_seqlen_q, c10::SymInt max_seqlen_k, const Tensor & logsumexp, double dropout_p, const Tensor & philox_seed, const Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, c10::optional scale, c10::optional num_splits_key); // {"schema": "aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor out, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt max_seqlen_q, SymInt max_seqlen_k, Tensor logsumexp, float dropout_p, Tensor philox_seed, Tensor philox_offset, int custom_mask_type, bool bias_requires_grad, *, float? scale=None, int? num_splits_key=None) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _triton_scaled_dot_attention(const Tensor & q, const Tensor & k, const Tensor & v, double dropout_p); // {"schema": "aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _fill_mem_eff_dropout_mask_(Tensor & self, double dropout_p, int64_t seed, int64_t offset); // {"schema": "aten::_fill_mem_eff_dropout_mask_(Tensor(a!) self, float dropout_p, int seed, int offset) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _triton_multi_head_attention(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask); // {"schema": "aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor special_airy_ai(const Tensor & x); // {"schema": "aten::special_airy_ai(Tensor x) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_airy_ai_out(const Tensor & x, Tensor & out); // {"schema": "aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_bessel_j0(const Tensor & self); // {"schema": "aten::special_bessel_j0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_bessel_j0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_bessel_j1(const Tensor & self); // {"schema": "aten::special_bessel_j1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_bessel_j1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_bessel_y0(const Tensor & self); // {"schema": "aten::special_bessel_y0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_bessel_y0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_bessel_y1(const Tensor & self); // {"schema": "aten::special_bessel_y1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_bessel_y1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_chebyshev_polynomial_t(const Tensor & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_t(const Scalar & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_t(const Tensor & x, const Scalar & n); // {"schema": "aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_t_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_chebyshev_polynomial_t_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_t_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_u(const Tensor & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_u(const Scalar & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_u(const Tensor & x, const Scalar & n); // {"schema": "aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_u_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_chebyshev_polynomial_u_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_u_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_v(const Tensor & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_v(const Scalar & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_v(const Tensor & x, const Scalar & n); // {"schema": "aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_v_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_chebyshev_polynomial_v_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_v_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_w(const Tensor & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_w(const Scalar & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_w(const Tensor & x, const Scalar & n); // {"schema": "aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_w_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_chebyshev_polynomial_w_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_w_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_h(const Tensor & x, const Tensor & n); // {"schema": "aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_h(const Scalar & x, const Tensor & n); // {"schema": "aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_h(const Tensor & x, const Scalar & n); // {"schema": "aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_hermite_polynomial_h_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_hermite_polynomial_h_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_hermite_polynomial_h_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_he(const Tensor & x, const Tensor & n); // {"schema": "aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_he(const Scalar & x, const Tensor & n); // {"schema": "aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_he(const Tensor & x, const Scalar & n); // {"schema": "aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_hermite_polynomial_he_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_hermite_polynomial_he_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_hermite_polynomial_he_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_laguerre_polynomial_l(const Tensor & x, const Tensor & n); // {"schema": "aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_laguerre_polynomial_l(const Scalar & x, const Tensor & n); // {"schema": "aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_laguerre_polynomial_l(const Tensor & x, const Scalar & n); // {"schema": "aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_laguerre_polynomial_l_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_laguerre_polynomial_l_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_laguerre_polynomial_l_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_legendre_polynomial_p(const Tensor & x, const Tensor & n); // {"schema": "aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_legendre_polynomial_p(const Scalar & x, const Tensor & n); // {"schema": "aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_legendre_polynomial_p(const Tensor & x, const Scalar & n); // {"schema": "aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_legendre_polynomial_p_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_legendre_polynomial_p_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_legendre_polynomial_p_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_modified_bessel_i0(const Tensor & self); // {"schema": "aten::special_modified_bessel_i0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_modified_bessel_i0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_modified_bessel_i1(const Tensor & self); // {"schema": "aten::special_modified_bessel_i1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_modified_bessel_i1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_modified_bessel_k0(const Tensor & self); // {"schema": "aten::special_modified_bessel_k0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_modified_bessel_k0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_modified_bessel_k1(const Tensor & self); // {"schema": "aten::special_modified_bessel_k1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_modified_bessel_k1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_scaled_modified_bessel_k0(const Tensor & x); // {"schema": "aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_scaled_modified_bessel_k0_out(const Tensor & x, Tensor & out); // {"schema": "aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_scaled_modified_bessel_k1(const Tensor & x); // {"schema": "aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_scaled_modified_bessel_k1_out(const Tensor & x, Tensor & out); // {"schema": "aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_shifted_chebyshev_polynomial_t(const Tensor & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_t(const Scalar & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_t(const Tensor & x, const Scalar & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_t_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_shifted_chebyshev_polynomial_t_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_t_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_u(const Tensor & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_u(const Scalar & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_u(const Tensor & x, const Scalar & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_u_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_shifted_chebyshev_polynomial_u_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_u_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_v(const Tensor & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_v(const Scalar & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_v(const Tensor & x, const Scalar & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_v_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_shifted_chebyshev_polynomial_v_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_v_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_w(const Tensor & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_w(const Scalar & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_w(const Tensor & x, const Scalar & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_w_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_shifted_chebyshev_polynomial_w_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_w_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_spherical_bessel_j0(const Tensor & x); // {"schema": "aten::special_spherical_bessel_j0(Tensor x) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_spherical_bessel_j0_out(const Tensor & x, Tensor & out); // {"schema": "aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _foobar(const Tensor & self, bool arg1, bool arg2, bool arg3); // {"schema": "aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor", "dispatch": "True", "default": "False"} +void _fused_adam_(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", "dispatch": "True", "default": "False"} +void _fused_adam_(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, const Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", "dispatch": "True", "default": "False"} +void _fused_adamw_(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", "dispatch": "True", "default": "False"} +void _fused_adamw_(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, const Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adamw_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", "dispatch": "True", "default": "False"} +void _fused_sgd_(TensorList self, TensorList grads, TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", "dispatch": "True", "default": "False"} +void _fused_sgd_(TensorList self, TensorList grads, TensorList momentum_buffer_list, double weight_decay, double momentum, const Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", "dispatch": "True", "default": "False"} +void _propagate_xla_data(const Tensor & input, const Tensor & output); // {"schema": "aten::_propagate_xla_data(Tensor input, Tensor output) -> ()", "dispatch": "False", "default": "True"} +Tensor & _new_zeros_with_same_feature_meta_out(const Tensor & self, const Tensor & other, int64_t self_num_batch_dims, Tensor & out); // {"schema": "aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _cudnn_ctc_loss_out(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, Tensor & out0, Tensor & out1); // {"schema": "aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _cudnn_rnn_flatten_weight_out(TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, Tensor & out); // {"schema": "aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _cudnn_rnn_out(const Tensor & input, TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4); // {"schema": "aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", "dispatch": "True", "default": "True"} +void _cudnn_rnn_backward_out(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const c10::optional & cx, const Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const Tensor & reserve, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2, TensorList out3); // {"schema": "aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()", "dispatch": "True", "default": "True"} +Tensor & _cudnn_init_dropout_state_out(double dropout, bool train, int64_t dropout_seed, Tensor & out); // {"schema": "aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _fused_dropout_out(const Tensor & self, double p, c10::optional generator, Tensor & out0, Tensor & out1); // {"schema": "aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _masked_scale_out(const Tensor & self, const Tensor & mask, double scale, Tensor & out); // {"schema": "aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple native_dropout_out(const Tensor & input, double p, c10::optional train, Tensor & out0, Tensor & out1); // {"schema": "aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & native_dropout_backward_out(const Tensor & grad_output, const Tensor & mask, double scale, Tensor & out); // {"schema": "aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _conj_physical_out(const Tensor & self, Tensor & out); // {"schema": "aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _add_relu_out(const Tensor & self, const Scalar & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & add_out(const Tensor & self, const Scalar & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & affine_grid_generator_out(const Tensor & theta, c10::SymIntArrayRef size, bool align_corners, Tensor & out); // {"schema": "aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_functorch_fallback_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::_test_functorch_fallback.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bartlett_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bartlett_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantized_batch_norm_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & mean, const Tensor & var, double eps, double output_scale, int64_t output_zero_point, Tensor & out); // {"schema": "aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bernoulli_out(const Tensor & self, const Tensor & p, c10::optional generator, Tensor & out); // {"schema": "aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bernoulli(const Tensor & self, const Tensor & p, c10::optional generator); // {"schema": "aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bernoulli_out(const Tensor & self, double p, c10::optional generator, Tensor & out); // {"schema": "aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & binary_cross_entropy_with_logits_out(const Tensor & self, const Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction, Tensor & out); // {"schema": "aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bincount_out(const Tensor & self, const c10::optional & weights, int64_t minlength, Tensor & out); // {"schema": "aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & blackman_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & blackman_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & block_diag_out(TensorList tensors, Tensor & out); // {"schema": "aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & constant_pad_nd_out(const Tensor & self, c10::SymIntArrayRef pad, const Scalar & value, Tensor & out); // {"schema": "aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & convolution_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, Tensor & out); // {"schema": "aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple convolution_backward_out(const Tensor & grad_output, const Tensor & input, const Tensor & weight, OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & convolution_overrideable_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, Tensor & out); // {"schema": "aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple convolution_backward_overrideable_out(const Tensor & grad_output, const Tensor & input, const Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & _convolution_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, Tensor & out); // {"schema": "aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & conv_tbc_out(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad, Tensor & out); // {"schema": "aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & copy_out(const Tensor & self, const Tensor & src, bool non_blocking, Tensor & out); // {"schema": "aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _copy_from_out(const Tensor & self, const Tensor & dst, bool non_blocking, Tensor & out); // {"schema": "aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _copy_from_and_resize_out(const Tensor & self, const Tensor & dst, Tensor & out); // {"schema": "aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & count_nonzero_out(const Tensor & self, IntArrayRef dim, Tensor & out); // {"schema": "aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & count_nonzero_out(const Tensor & self, c10::optional dim, Tensor & out); // {"schema": "aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cudnn_affine_grid_generator_out(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, Tensor & out); // {"schema": "aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cudnn_affine_grid_generator_backward_out(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, Tensor & out); // {"schema": "aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple cudnn_batch_norm_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +::std::tuple cudnn_batch_norm_backward_out(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const Tensor & reserveSpace, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & cudnn_convolution_transpose_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, Tensor & out); // {"schema": "aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mps_convolution_transpose_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, Tensor & out); // {"schema": "aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple mps_convolution_transpose_backward_out(const Tensor & self, const Tensor & grad_output, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, Tensor & out0, Tensor & out1); // {"schema": "aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & cudnn_convolution_relu_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, Tensor & out); // {"schema": "aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cudnn_convolution_add_relu_out(const Tensor & self, const Tensor & weight, const Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, Tensor & out); // {"schema": "aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cudnn_grid_sampler_out(const Tensor & self, const Tensor & grid, Tensor & out); // {"schema": "aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple cudnn_grid_sampler_backward_out(const Tensor & self, const Tensor & grid, const Tensor & grad_output, Tensor & out0, Tensor & out1); // {"schema": "aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _ctc_loss_out(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool zero_infinity, Tensor & out0, Tensor & out1); // {"schema": "aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _ctc_loss_out(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, bool zero_infinity, Tensor & out0, Tensor & out1); // {"schema": "aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _ctc_loss_backward_out(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank, bool zero_infinity, Tensor & out); // {"schema": "aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & diag_embed_out(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, Tensor & out); // {"schema": "aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & diagonal_backward_out(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, Tensor & out); // {"schema": "aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & div_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & div_out(const Tensor & self, const Scalar & other, c10::optional rounding_mode, Tensor & out); // {"schema": "aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & embedding_out(const Tensor & weight, const Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, Tensor & out); // {"schema": "aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & embedding_dense_backward_out(const Tensor & grad_output, const Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, Tensor & out); // {"schema": "aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & embedding_renorm_out(const Tensor & self, const Tensor & indices, double max_norm, double norm_type, Tensor & out); // {"schema": "aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor embedding_renorm(const Tensor & self, const Tensor & indices, double max_norm, double norm_type); // {"schema": "aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple _embedding_bag_forward_only_out(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +::std::tuple _embedding_bag_out(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +Tensor & _embedding_bag_dense_backward_out(const Tensor & grad, const Tensor & indices, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx, Tensor & out); // {"schema": "aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _embedding_bag_per_sample_weights_backward_out(const Tensor & grad, const Tensor & weight, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, int64_t mode, int64_t padding_idx, Tensor & out); // {"schema": "aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & empty_out(IntArrayRef size, c10::optional names, c10::optional memory_format, Tensor & out); // {"schema": "aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & empty_permuted_out(c10::SymIntArrayRef size, IntArrayRef physical_layout, Tensor & out); // {"schema": "aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_empty_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_empty_strided_out(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_full_out(const Tensor & self, c10::SymIntArrayRef size, const Scalar & fill_value, Tensor & out); // {"schema": "aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_zeros_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_ones_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _empty_affine_quantized_out(c10::SymIntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, Tensor & out); // {"schema": "aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _empty_per_channel_affine_quantized_out(c10::SymIntArrayRef size, const Tensor & scales, const Tensor & zero_points, int64_t axis, c10::optional memory_format, Tensor & out); // {"schema": "aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +const Tensor & resize_out(const Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format, const Tensor & out); // {"schema": "aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor resize(const Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format); // {"schema": "aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +const Tensor & _resize_output_out(const Tensor & self, c10::SymIntArrayRef size, Device device, const Tensor & out); // {"schema": "aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _resize_output(const Tensor & self, c10::SymIntArrayRef size, Device device); // {"schema": "aten::_resize_output(Tensor self, SymInt[] size, Device device) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & empty_quantized_out(IntArrayRef size, const Tensor & qtensor, c10::optional memory_format, Tensor & out); // {"schema": "aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & empty_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & empty_strided_out(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & fill_out(const Tensor & self, const Scalar & value, Tensor & out); // {"schema": "aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & fill_out(const Tensor & self, const Tensor & value, Tensor & out); // {"schema": "aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & floor_divide_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & full_out(IntArrayRef size, const Scalar & fill_value, c10::optional names, Tensor & out); // {"schema": "aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & full_like_out(const Tensor & self, const Scalar & fill_value, c10::optional memory_format, Tensor & out); // {"schema": "aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & from_file_out(c10::string_view filename, c10::optional shared, c10::optional size, Tensor & out); // {"schema": "aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & grid_sampler_2d_out(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, Tensor & out); // {"schema": "aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple grid_sampler_2d_backward_out(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, Tensor & out0, Tensor & out1); // {"schema": "aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _grid_sampler_2d_cpu_fallback_out(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, Tensor & out); // {"schema": "aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & grid_sampler_3d_out(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, Tensor & out); // {"schema": "aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple grid_sampler_3d_backward_out(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, Tensor & out0, Tensor & out1); // {"schema": "aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & hann_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hann_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hamming_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hamming_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hamming_window_out(int64_t window_length, bool periodic, double alpha, Tensor & out); // {"schema": "aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hamming_window_out(int64_t window_length, bool periodic, double alpha, double beta, Tensor & out); // {"schema": "aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & kaiser_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & kaiser_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & kaiser_window_out(int64_t window_length, bool periodic, double beta, Tensor & out); // {"schema": "aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple native_group_norm_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple native_group_norm_backward_out(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & index_put_out(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate, Tensor & out); // {"schema": "aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _index_put_impl_out(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate, bool unsafe, Tensor & out); // {"schema": "aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _index_put_impl(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate, bool unsafe); // {"schema": "aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isnan_out(const Tensor & self, Tensor & out); // {"schema": "aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple native_layer_norm_out(const Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple native_layer_norm_backward_out(const Tensor & grad_out, const Tensor & input, c10::SymIntArrayRef normalized_shape, const Tensor & mean, const Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple linear_backward_out(const Tensor & self, const Tensor & grad_output, const Tensor & weight, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & mkldnn_linear_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, Tensor & out); // {"schema": "aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_linear_backward_input_out(IntArrayRef input_size, const Tensor & grad_output, const Tensor & weight, Tensor & out); // {"schema": "aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple mkldnn_linear_backward_weights_out(const Tensor & grad_output, const Tensor & input, const Tensor & weight, bool bias_defined, Tensor & out0, Tensor & out1); // {"schema": "aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple mkldnn_linear_backward_out(const Tensor & self, const Tensor & grad_output, const Tensor & weight, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple matmul_backward_out(const Tensor & grad, const Tensor & self, const Tensor & other, ::std::array mask, Tensor & out0, Tensor & out1); // {"schema": "aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _aminmax_out(const Tensor & self, Tensor & out0, Tensor & out1); // {"schema": "aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _aminmax_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & out0, Tensor & out1); // {"schema": "aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & max_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_max_pool2d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_max_pool2d_backward_out(const Tensor & grad_output, const Tensor & output, const Tensor & input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_max_pool3d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_max_pool3d_backward_out(const Tensor & grad_output, const Tensor & output, const Tensor & input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantized_max_pool1d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantized_max_pool2d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantized_max_pool3d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & median_out(const Tensor & self, Tensor & out); // {"schema": "aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & nanmedian_out(const Tensor & self, Tensor & out); // {"schema": "aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mps_convolution_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, Tensor & out); // {"schema": "aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple mps_convolution_backward_out(const Tensor & self, const Tensor & grad_output, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & mkldnn_convolution_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, Tensor & out); // {"schema": "aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple mkldnn_rnn_layer_out(const Tensor & input, const Tensor & weight0, const Tensor & weight1, const Tensor & weight2, const Tensor & weight3, const Tensor & hx_, const Tensor & cx_, bool reverse, IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +::std::tuple mkldnn_rnn_layer_backward_out(const Tensor & input, const Tensor & weight1, const Tensor & weight2, const Tensor & weight3, const Tensor & weight4, const Tensor & hx_, const Tensor & cx_tmp, const Tensor & output, const Tensor & hy_, const Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, IntArrayRef batch_sizes, bool batch_first, const Tensor & workspace, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4, Tensor & out5, Tensor & out6); // {"schema": "aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))", "dispatch": "True", "default": "True"} +::std::tuple miopen_batch_norm_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple miopen_batch_norm_backward_out(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & miopen_convolution_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, Tensor & out); // {"schema": "aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & miopen_convolution_transpose_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, Tensor & out); // {"schema": "aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & miopen_depthwise_convolution_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, Tensor & out); // {"schema": "aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple miopen_rnn_out(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional & dropout_state, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4); // {"schema": "aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", "dispatch": "True", "default": "True"} +void miopen_rnn_backward_out(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const c10::optional & cx, const Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional & dropout_state, const Tensor & reserve, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2, TensorList out3); // {"schema": "aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()", "dispatch": "True", "default": "True"} +Tensor & _sparse_sparse_matmul_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mul_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _native_batch_norm_legit_functional(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps); // {"schema": "aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)", "dispatch": "True", "default": "True"} +::std::tuple _native_batch_norm_legit_no_training_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & running_mean, const Tensor & running_var, double momentum, double eps, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_stats_out(const Tensor & input, double eps, Tensor & out0, Tensor & out1); // {"schema": "aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_gather_stats_out(const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, int64_t count, Tensor & out0, Tensor & out1); // {"schema": "aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_gather_stats_with_counts_out(const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const Tensor & counts, Tensor & out0, Tensor & out1); // {"schema": "aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple native_batch_norm_backward_out(const Tensor & grad_out, const Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_backward_reduce_out(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +Tensor & batch_norm_backward_elemt_out(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & weight, const Tensor & sum_dy, const Tensor & sum_dy_xmu, const Tensor & count, Tensor & out); // {"schema": "aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_update_stats_out(const Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum, Tensor & out0, Tensor & out1); // {"schema": "aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _nnpack_spatial_convolution_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ones_out(IntArrayRef size, c10::optional names, Tensor & out); // {"schema": "aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ones_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _euclidean_dist_out(const Tensor & x1, const Tensor & x2, Tensor & out); // {"schema": "aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _cdist_forward_out(const Tensor & x1, const Tensor & x2, double p, c10::optional compute_mode, Tensor & out); // {"schema": "aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _cdist_backward_out(const Tensor & grad, const Tensor & x1, const Tensor & x2, double p, const Tensor & cdist, Tensor & out); // {"schema": "aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _pdist_forward_out(const Tensor & self, double p, Tensor & out); // {"schema": "aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _pdist_backward_out(const Tensor & grad, const Tensor & self, double p, const Tensor & pdist, Tensor & out); // {"schema": "aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & pixel_shuffle_out(const Tensor & self, int64_t upscale_factor, Tensor & out); // {"schema": "aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & pixel_unshuffle_out(const Tensor & self, int64_t downscale_factor, Tensor & out); // {"schema": "aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & channel_shuffle_out(const Tensor & self, c10::SymInt groups, Tensor & out); // {"schema": "aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _pin_memory_out(const Tensor & self, c10::optional device, Tensor & out); // {"schema": "aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scalar_tensor_out(const Scalar & s, Tensor & out); // {"schema": "aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rand_out(c10::SymIntArrayRef size, c10::optional names, Tensor & out); // {"schema": "aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rand_out(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, Tensor & out); // {"schema": "aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rand_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_like_out(const Tensor & self, c10::SymInt high, c10::optional memory_format, Tensor & out); // {"schema": "aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_like_out(const Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional memory_format, Tensor & out); // {"schema": "aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randn_out(c10::SymIntArrayRef size, c10::optional names, Tensor & out); // {"schema": "aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randn_out(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, Tensor & out); // {"schema": "aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randn_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & repeat_out(const Tensor & self, c10::SymIntArrayRef repeats, Tensor & out); // {"schema": "aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & repeat_interleave_out(const Tensor & repeats, c10::optional output_size, Tensor & out); // {"schema": "aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mkldnn_reshape_out(const Tensor & self, IntArrayRef shape, Tensor & out); // {"schema": "aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & relu_out(const Tensor & self, Tensor & out); // {"schema": "aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & select_backward_out(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, Tensor & out); // {"schema": "aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & celu_out(const Tensor & self, const Scalar & alpha, Tensor & out); // {"schema": "aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slice_backward_out(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, Tensor & out); // {"schema": "aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slice_scatter_out(const Tensor & self, const Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, Tensor & out); // {"schema": "aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & select_scatter_out(const Tensor & self, const Tensor & src, int64_t dim, c10::SymInt index, Tensor & out); // {"schema": "aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & diagonal_scatter_out(const Tensor & self, const Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, Tensor & out); // {"schema": "aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & as_strided_scatter_out(const Tensor & self, const Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, Tensor & out); // {"schema": "aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void unsafe_split_out(const Tensor & self, c10::SymInt split_size, int64_t dim, TensorList out); // {"schema": "aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void unsafe_split_with_sizes_out(const Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, TensorList out); // {"schema": "aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & sum_out(const Tensor & self, c10::optional dtype, Tensor & out); // {"schema": "aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple std_mean_out(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, Tensor & out0, Tensor & out1); // {"schema": "aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & prod_out(const Tensor & self, c10::optional dtype, Tensor & out); // {"schema": "aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mkldnn_transpose_out(const Tensor & self, int64_t dim0, int64_t dim1, Tensor & out); // {"schema": "aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & flip_out(const Tensor & self, IntArrayRef dims, Tensor & out); // {"schema": "aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & roll_out(const Tensor & self, c10::SymIntArrayRef shifts, IntArrayRef dims, Tensor & out); // {"schema": "aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rot90_out(const Tensor & self, int64_t k, IntArrayRef dims, Tensor & out); // {"schema": "aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _transform_bias_rescale_qkv_out(const Tensor & qkv, const Tensor & qkv_bias, int64_t num_heads, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_from_mask_out(const Tensor & t, const Tensor & mask, bool mask_check, Tensor & out); // {"schema": "aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_from_padded_out(const Tensor & padded, const Tensor & cpu_nested_shape_example, bool fuse_transform_0213, Tensor & out); // {"schema": "aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_size_out(const Tensor & self, Tensor & out); // {"schema": "aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_strides_out(const Tensor & self, Tensor & out); // {"schema": "aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_storage_offsets_out(const Tensor & self, Tensor & out); // {"schema": "aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_from_padded_and_nested_example_out(const Tensor & padded, const Tensor & nt_example, Tensor & out); // {"schema": "aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_view_from_buffer_copy_out(const Tensor & self, const Tensor & nested_size, const Tensor & nested_strides, const Tensor & offsets, Tensor & out); // {"schema": "aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_view_from_jagged_copy_out(const Tensor & self, const Tensor & offsets, const Tensor & dummy, const c10::optional & lengths, int64_t ragged_idx, Tensor & out); // {"schema": "aten::_nested_view_from_jagged_copy.out(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_get_values_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_nested_get_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _trilinear_out(const Tensor & i1, const Tensor & i2, const Tensor & i3, IntArrayRef expand1, IntArrayRef expand2, IntArrayRef expand3, IntArrayRef sumdim, int64_t unroll_dim, Tensor & out); // {"schema": "aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _unique_out(const Tensor & self, bool sorted, bool return_inverse, Tensor & out0, Tensor & out1); // {"schema": "aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple unique_dim_out(const Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple unique_consecutive_out(const Tensor & self, bool return_inverse, bool return_counts, c10::optional dim, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple unique_dim_consecutive_out(const Tensor & self, int64_t dim, bool return_inverse, bool return_counts, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple _unique2_out(const Tensor & self, bool sorted, bool return_inverse, bool return_counts, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & _unsafe_view_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple var_mean_out(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, Tensor & out0, Tensor & out1); // {"schema": "aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _weight_norm_interface_out(const Tensor & v, const Tensor & g, int64_t dim, Tensor & out0, Tensor & out1); // {"schema": "aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _weight_norm_interface_backward_out(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim, Tensor & out0, Tensor & out1); // {"schema": "aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & zeros_out(IntArrayRef size, c10::optional names, Tensor & out); // {"schema": "aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _efficientzerotensor_out(c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & zeros_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _standard_gamma_grad_out(const Tensor & self, const Tensor & output, Tensor & out); // {"schema": "aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _standard_gamma_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _dirichlet_grad_out(const Tensor & x, const Tensor & alpha, const Tensor & total, Tensor & out); // {"schema": "aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sample_dirichlet_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & poisson_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & binomial_out(const Tensor & count, const Tensor & prob, c10::optional generator, Tensor & out); // {"schema": "aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & native_norm_out(const Tensor & self, const Scalar & p, Tensor & out); // {"schema": "aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & native_norm_out(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_sum_out(const Tensor & self, IntArrayRef dim, Tensor & out); // {"schema": "aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_sum_backward_out(const Tensor & grad, const Tensor & self, IntArrayRef dim, Tensor & out); // {"schema": "aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_csr_sum_out(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_csr_prod_out(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_softmax_out(const Tensor & self, int64_t dim, bool half_to_float, Tensor & out); // {"schema": "aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_softmax_backward_data_out(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self, Tensor & out); // {"schema": "aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_log_softmax_out(const Tensor & self, int64_t dim, bool half_to_float, Tensor & out); // {"schema": "aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_log_softmax_backward_data_out(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self, Tensor & out); // {"schema": "aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _spdiags_out(const Tensor & diagonals, const Tensor & offsets, IntArrayRef shape, c10::optional layout, Tensor & out); // {"schema": "aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, ScalarType dtype, Tensor & out); // {"schema": "aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & norm_out(const Tensor & self, const Scalar & p, Tensor & out); // {"schema": "aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clone_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +const Tensor & resize_as_out(const Tensor & self, const Tensor & the_template, c10::optional memory_format, const Tensor & out); // {"schema": "aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor resize_as(const Tensor & self, const Tensor & the_template, c10::optional memory_format); // {"schema": "aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +const Tensor & resize_as_sparse_out(const Tensor & self, const Tensor & the_template, const Tensor & out); // {"schema": "aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor resize_as_sparse(const Tensor & self, const Tensor & the_template); // {"schema": "aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & zero_out(const Tensor & self, Tensor & out); // {"schema": "aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor zero(const Tensor & self); // {"schema": "aten::zero(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sub_out(const Tensor & self, const Scalar & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rsub_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rsub_out(const Tensor & self, const Scalar & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_addmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sparse_coo_tensor_out(IntArrayRef size, Tensor & out); // {"schema": "aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_coo_tensor_with_dims_out(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, Tensor & out); // {"schema": "aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const Tensor & indices, const Tensor & values, c10::optional is_coalesced, Tensor & out); // {"schema": "aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +const Tensor & sparse_resize_out(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const Tensor & out); // {"schema": "aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor sparse_resize(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); // {"schema": "aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor", "dispatch": "True", "default": "True"} +const Tensor & sparse_resize_and_clear_out(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const Tensor & out); // {"schema": "aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor sparse_resize_and_clear(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); // {"schema": "aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sparse_mask_out(const Tensor & self, const Tensor & mask, Tensor & out); // {"schema": "aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_mask_projection_out(const Tensor & self, const Tensor & mask, bool accumulate_matches, Tensor & out); // {"schema": "aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _to_dense_out(const Tensor & self, c10::optional dtype, c10::optional masked_grad, Tensor & out); // {"schema": "aten::_to_dense.out(Tensor self, ScalarType? dtype=None, bool? masked_grad=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _coalesce_out(const Tensor & self, Tensor & out); // {"schema": "aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _coalesced_out(const Tensor & self, bool coalesced, Tensor & out); // {"schema": "aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _coalesced(const Tensor & self, bool coalesced); // {"schema": "aten::_coalesced(Tensor self, bool coalesced) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & copy_sparse_to_sparse_out(const Tensor & self, const Tensor & src, bool non_blocking, Tensor & out); // {"schema": "aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor copy_sparse_to_sparse(const Tensor & self, const Tensor & src, bool non_blocking); // {"schema": "aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _to_sparse_out(const Tensor & self, int64_t sparse_dim, Tensor & out); // {"schema": "aten::_to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _to_sparse_out(const Tensor & self, c10::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim, Tensor & out); // {"schema": "aten::_to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _to_sparse_csr_out(const Tensor & self, c10::optional dense_dim, Tensor & out); // {"schema": "aten::_to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _to_sparse_csc_out(const Tensor & self, c10::optional dense_dim, Tensor & out); // {"schema": "aten::_to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _to_sparse_bsr_out(const Tensor & self, IntArrayRef blocksize, c10::optional dense_dim, Tensor & out); // {"schema": "aten::_to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _to_sparse_bsc_out(const Tensor & self, IntArrayRef blocksize, c10::optional dense_dim, Tensor & out); // {"schema": "aten::_to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & to_mkldnn_out(const Tensor & self, c10::optional dtype, Tensor & out); // {"schema": "aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_reorder_conv2d_weight_out(const Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, OptionalSymIntArrayRef input_size, Tensor & out); // {"schema": "aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_reorder_conv3d_weight_out(const Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, Tensor & out); // {"schema": "aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantize_per_tensor_dynamic_out(const Tensor & self, ScalarType dtype, bool reduce_range, Tensor & out); // {"schema": "aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantize_per_tensor_out(const Tensor & self, double scale, int64_t zero_point, ScalarType dtype, Tensor & out); // {"schema": "aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantize_per_tensor_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, ScalarType dtype, Tensor & out); // {"schema": "aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void quantize_per_tensor_out(TensorList tensors, const Tensor & scales, const Tensor & zero_points, ScalarType dtype, TensorList out); // {"schema": "aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & quantize_per_channel_out(const Tensor & self, const Tensor & scales, const Tensor & zero_points, int64_t axis, ScalarType dtype, Tensor & out); // {"schema": "aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & dequantize_out(const Tensor & self, Tensor & out); // {"schema": "aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void dequantize_out(TensorList tensors, TensorList out); // {"schema": "aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & q_per_channel_scales_out(const Tensor & self, Tensor & out); // {"schema": "aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & q_per_channel_zero_points_out(const Tensor & self, Tensor & out); // {"schema": "aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & int_repr_out(const Tensor & self, Tensor & out); // {"schema": "aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _make_per_tensor_quantized_tensor_out(const Tensor & self, double scale, int64_t zero_point, Tensor & out); // {"schema": "aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _make_per_channel_quantized_tensor_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, Tensor & out); // {"schema": "aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple fake_quantize_per_tensor_affine_cachemask_out(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, Tensor & out0, Tensor & out1); // {"schema": "aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, const Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, Tensor & out0, Tensor & out1); // {"schema": "aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _fake_quantize_learnable_per_tensor_affine_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, Tensor & out); // {"schema": "aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple fake_quantize_per_channel_affine_cachemask_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, Tensor & out0, Tensor & out1); // {"schema": "aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _fake_quantize_learnable_per_channel_affine_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, Tensor & out); // {"schema": "aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _fused_moving_avg_obs_fq_helper_out(const Tensor & self, const Tensor & observer_on, const Tensor & fake_quant_on, Tensor & running_min, Tensor & running_max, Tensor & scale, Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, Tensor & out0, Tensor & out1); // {"schema": "aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))", "dispatch": "True", "default": "True"} +::std::tuple _fused_moving_avg_obs_fq_helper_functional(const Tensor & self, const Tensor & observer_on, const Tensor & fake_quant_on, const Tensor & running_min, const Tensor & running_max, const Tensor & scale, const Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); // {"schema": "aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out)", "dispatch": "True", "default": "True"} +Tensor & _to_copy_out(const Tensor & self, bool non_blocking, c10::optional memory_format, Tensor & out); // {"schema": "aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _lstm_mps_out(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4, Tensor & out5); // {"schema": "aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!))", "dispatch": "True", "default": "True"} +void lstm_mps_backward_out(const c10::optional & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & z_state, const Tensor & cell_state_fwd, const Tensor & input, const Tensor & layersOutputs, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, Tensor & out0, TensorList out1, TensorList out2); // {"schema": "aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()", "dispatch": "True", "default": "True"} +::std::tuple _thnn_fused_lstm_cell_out(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & cx, const c10::optional & input_bias, const c10::optional & hidden_bias, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple _thnn_fused_lstm_cell_backward_impl_out(const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & cx, const Tensor & cy, const Tensor & workspace, bool has_bias, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple _thnn_fused_gru_cell_out(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias, Tensor & out0, Tensor & out1); // {"schema": "aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _thnn_fused_gru_cell_backward_out(const Tensor & grad_hy, const Tensor & workspace, bool has_bias, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4); // {"schema": "aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", "dispatch": "True", "default": "True"} +::std::tuple _pack_padded_sequence_out(const Tensor & input, const Tensor & lengths, bool batch_first, Tensor & out0, Tensor & out1); // {"schema": "aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & set_out(const Tensor & self, Storage source, Tensor & out); // {"schema": "aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor set(const Tensor & self, Storage source); // {"schema": "aten::set.source_Storage(Tensor self, Storage source) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & set_out(const Tensor & self, Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor set(const Tensor & self, Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & set_out(const Tensor & self, const Tensor & source, Tensor & out); // {"schema": "aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor set(const Tensor & self, const Tensor & source); // {"schema": "aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & set_out(const Tensor & self, Tensor & out); // {"schema": "aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor set(const Tensor & self); // {"schema": "aten::set(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & lift_out(const Tensor & self, Tensor & out); // {"schema": "aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lift_fresh_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & masked_fill_out(const Tensor & self, const Tensor & mask, const Scalar & value, Tensor & out); // {"schema": "aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & masked_fill_out(const Tensor & self, const Tensor & mask, const Tensor & value, Tensor & out); // {"schema": "aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & masked_scatter_out(const Tensor & self, const Tensor & mask, const Tensor & source, Tensor & out); // {"schema": "aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _masked_softmax_out(const Tensor & self, const Tensor & mask, c10::optional dim, c10::optional mask_type, Tensor & out); // {"schema": "aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _masked_softmax_backward_out(const Tensor & grad_output, const Tensor & output, const Tensor & mask, c10::optional dim, Tensor & out); // {"schema": "aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & put_out(const Tensor & self, const Tensor & index, const Tensor & source, bool accumulate, Tensor & out); // {"schema": "aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & index_fill_out(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, Tensor & out); // {"schema": "aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & index_fill_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & value, Tensor & out); // {"schema": "aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_and_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_or_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_xor_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & __lshift___out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & __lshift___out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & __rshift___out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & __rshift___out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & random_out(const Tensor & self, int64_t from, c10::optional to, c10::optional generator, Tensor & out); // {"schema": "aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor random(const Tensor & self, int64_t from, c10::optional to, c10::optional generator); // {"schema": "aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & random_out(const Tensor & self, int64_t to, c10::optional generator, Tensor & out); // {"schema": "aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor random(const Tensor & self, int64_t to, c10::optional generator); // {"schema": "aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & random_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor random(const Tensor & self, c10::optional generator); // {"schema": "aten::random(Tensor self, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & uniform_out(const Tensor & self, double from, double to, c10::optional generator, Tensor & out); // {"schema": "aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor uniform(const Tensor & self, double from, double to, c10::optional generator); // {"schema": "aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cauchy_out(const Tensor & self, double median, double sigma, c10::optional generator, Tensor & out); // {"schema": "aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor cauchy(const Tensor & self, double median, double sigma, c10::optional generator); // {"schema": "aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log_normal_out(const Tensor & self, double mean, double std, c10::optional generator, Tensor & out); // {"schema": "aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor log_normal(const Tensor & self, double mean, double std, c10::optional generator); // {"schema": "aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & exponential_out(const Tensor & self, double lambd, c10::optional generator, Tensor & out); // {"schema": "aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor exponential(const Tensor & self, double lambd, c10::optional generator); // {"schema": "aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & geometric_out(const Tensor & self, double p, c10::optional generator, Tensor & out); // {"schema": "aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor geometric(const Tensor & self, double p, c10::optional generator); // {"schema": "aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tril_indices_out(int64_t row, int64_t col, int64_t offset, Tensor & out); // {"schema": "aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & triu_indices_out(int64_t row, int64_t col, int64_t offset, Tensor & out); // {"schema": "aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & trace_out(const Tensor & self, Tensor & out); // {"schema": "aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _cholesky_solve_helper_out(const Tensor & self, const Tensor & A, bool upper, Tensor & out); // {"schema": "aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & dist_out(const Tensor & self, const Tensor & other, const Scalar & p, Tensor & out); // {"schema": "aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void _histogramdd_bin_edges_out(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, TensorList out); // {"schema": "aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & _histogramdd_from_bin_cts_out(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, Tensor & out); // {"schema": "aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _histogramdd_from_bin_tensors_out(const Tensor & self, TensorList bins, const c10::optional & weight, bool density, Tensor & out); // {"schema": "aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & remainder_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & argsort_out(const Tensor & self, bool stable, int64_t dim, bool descending, Tensor & out); // {"schema": "aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & unfold_backward_out(const Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, Tensor & out); // {"schema": "aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & normal_out(const Tensor & self, double mean, double std, c10::optional generator, Tensor & out); // {"schema": "aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void _amp_foreach_non_finite_check_and_unscale_out(TensorList self, Tensor & found_inf, const Tensor & inv_scale, TensorList out); // {"schema": "aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,Tensor> _amp_foreach_non_finite_check_and_unscale(TensorList self, const Tensor & found_inf, const Tensor & inv_scale); // {"schema": "aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)", "dispatch": "True", "default": "True"} +Tensor & _amp_update_scale_out(const Tensor & self, Tensor & growth_tracker, const Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, Tensor & out); // {"schema": "aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _amp_update_scale(const Tensor & self, const Tensor & growth_tracker, const Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); // {"schema": "aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out)", "dispatch": "True", "default": "True"} +void _foreach_add_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_add_out(TensorList self, TensorList other, const Scalar & alpha, TensorList out); // {"schema": "aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_add_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_add_out(TensorList self, const Tensor & other, const Scalar & alpha, TensorList out); // {"schema": "aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sub_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sub_out(TensorList self, TensorList other, const Scalar & alpha, TensorList out); // {"schema": "aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sub_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_mul_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_mul_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_mul_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_mul_out(TensorList self, const Tensor & other, TensorList out); // {"schema": "aten::_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_div_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_div_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_div_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_div_out(TensorList self, const Tensor & other, TensorList out); // {"schema": "aten::_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_clamp_max_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_clamp_max_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_clamp_max_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_clamp_min_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_clamp_min_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_clamp_min_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_maximum_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_maximum_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_maximum_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_minimum_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_minimum_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_minimum_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcdiv_out(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value, TensorList out); // {"schema": "aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcdiv_out(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcdiv_out(TensorList self, TensorList tensor1, TensorList tensor2, const Tensor & scalars, TensorList out); // {"schema": "aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcmul_out(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value, TensorList out); // {"schema": "aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcmul_out(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcmul_out(TensorList self, TensorList tensor1, TensorList tensor2, const Tensor & scalars, TensorList out); // {"schema": "aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_abs_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_acos_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_asin_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_atan_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_ceil_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_cos_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_cosh_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_erf_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_erfc_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_exp_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_expm1_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_floor_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_frac_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_lerp_out(TensorList self, TensorList tensors1, TensorList weights, TensorList out); // {"schema": "aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_lerp_out(TensorList self, TensorList tensors1, const Scalar & weight, TensorList out); // {"schema": "aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_lgamma_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_log_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_log10_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_log1p_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_log2_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_neg_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_norm_out(TensorList self, const Scalar & ord, TensorList out); // {"schema": "aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_pow_out(TensorList self, TensorList exponent, TensorList out); // {"schema": "aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_pow_out(TensorList self, const Scalar & exponent, TensorList out); // {"schema": "aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_pow_out(TensorList self, ArrayRef exponent, TensorList out); // {"schema": "aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_reciprocal_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_round_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sigmoid_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sign_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sign.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sin_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sinh_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sqrt_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_tan_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_tanh_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_trunc_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_zero_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::vector _foreach_zero(TensorList self); // {"schema": "aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out", "dispatch": "True", "default": "True"} +void _foreach_copy_out(TensorList self, TensorList src, bool non_blocking, TensorList out); // {"schema": "aten::_foreach_copy.out(Tensor[] self, Tensor[] src, bool non_blocking=False, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::vector _foreach_copy(TensorList self, TensorList src, bool non_blocking); // {"schema": "aten::_foreach_copy(Tensor[] self, Tensor[] src, bool non_blocking=False) -> Tensor[] self_out", "dispatch": "True", "default": "True"} +Tensor & bucketize_out(const Scalar & self, const Tensor & boundaries, bool out_int32, bool right, Tensor & out); // {"schema": "aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & glu_jvp_out(const Tensor & glu, const Tensor & x, const Tensor & dx, int64_t dim, Tensor & out); // {"schema": "aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & glu_backward_jvp_out(const Tensor & grad_x, const Tensor & grad_glu, const Tensor & x, const Tensor & dgrad_glu, const Tensor & dx, int64_t dim, Tensor & out); // {"schema": "aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hardswish_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & out); // {"schema": "aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rrelu_with_noise_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, bool self_is_result, Tensor & out); // {"schema": "aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_adaptive_avg_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & out); // {"schema": "aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _adaptive_avg_pool2d_out(const Tensor & self, c10::SymIntArrayRef output_size, Tensor & out); // {"schema": "aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _adaptive_avg_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & out); // {"schema": "aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _adaptive_avg_pool3d_out(const Tensor & self, c10::SymIntArrayRef output_size, Tensor & out); // {"schema": "aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _adaptive_avg_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & out); // {"schema": "aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _slow_conv2d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & conv_depthwise3d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, Tensor & out); // {"schema": "aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slow_conv_dilated2d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, Tensor & out); // {"schema": "aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slow_conv_dilated3d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, Tensor & out); // {"schema": "aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & isinf_out(const Tensor & self, Tensor & out); // {"schema": "aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & linalg_matrix_exp_out(const Tensor & self, Tensor & out); // {"schema": "aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_optional_intlist_out(const Tensor & values, OptionalIntArrayRef addends, Tensor & out); // {"schema": "aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_optional_filled_intlist_out(const Tensor & values, OptionalIntArrayRef addends, Tensor & out); // {"schema": "aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_optional_floatlist_out(const Tensor & values, c10::optional> addends, Tensor & out); // {"schema": "aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_warn_in_autograd_out(const Tensor & self, Tensor & out); // {"schema": "aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_autograd_multiple_dispatch_out(const Tensor & self, Tensor & out); // {"schema": "aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_autograd_multiple_dispatch_view_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & segment_reduce_out(const Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial, Tensor & out); // {"schema": "aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _segment_reduce_backward_out(const Tensor & grad, const Tensor & output, const Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & offsets, int64_t axis, const c10::optional & initial, Tensor & out); // {"schema": "aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_from_tensor_list_out(TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, Tensor & out); // {"schema": "aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _fw_primal_copy_out(const Tensor & self, int64_t level, Tensor & out); // {"schema": "aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _make_dual_copy_out(const Tensor & primal, const Tensor & tangent, int64_t level, Tensor & out); // {"schema": "aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & view_as_real_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & view_as_complex_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _conj_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _neg_view_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & as_strided_copy_out(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, Tensor & out); // {"schema": "aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_broadcast_to_copy_out(const Tensor & self, IntArrayRef size, Tensor & out); // {"schema": "aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & diagonal_copy_out(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, Tensor & out); // {"schema": "aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & expand_copy_out(const Tensor & self, c10::SymIntArrayRef size, bool implicit, Tensor & out); // {"schema": "aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & permute_copy_out(const Tensor & self, IntArrayRef dims, Tensor & out); // {"schema": "aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _reshape_alias_copy_out(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & select_copy_out(const Tensor & self, int64_t dim, c10::SymInt index, Tensor & out); // {"schema": "aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & detach_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slice_copy_out(const Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, Tensor & out); // {"schema": "aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_copy_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_copy_out(const Tensor & self, IntArrayRef dim, Tensor & out); // {"schema": "aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & t_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & transpose_copy_out(const Tensor & self, int64_t dim0, int64_t dim1, Tensor & out); // {"schema": "aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & unsqueeze_copy_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _values_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & values_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & crow_indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & col_indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ccol_indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & row_indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & view_copy_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & view_copy_out(const Tensor & self, ScalarType dtype, Tensor & out); // {"schema": "aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & unfold_copy_out(const Tensor & self, int64_t dimension, int64_t size, int64_t step, Tensor & out); // {"schema": "aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & alias_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & to_padded_tensor_out(const Tensor & self, double padding, OptionalSymIntArrayRef output_size, Tensor & out); // {"schema": "aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _transformer_encoder_layer_fwd_out(const Tensor & src, int64_t embed_dim, int64_t num_heads, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const Tensor & norm_weight_1, const Tensor & norm_bias_1, const Tensor & norm_weight_2, const Tensor & norm_bias_2, const Tensor & ffn_weight_1, const Tensor & ffn_bias_1, const Tensor & ffn_weight_2, const Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type, Tensor & out); // {"schema": "aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _native_multi_head_attention_out(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type, Tensor & out0, Tensor & out1); // {"schema": "aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _triton_scaled_dot_attention_out(const Tensor & q, const Tensor & k, const Tensor & v, double dropout_p, Tensor & out); // {"schema": "aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _triton_multi_head_attention_out(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask, Tensor & out); // {"schema": "aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _foobar_out(const Tensor & self, bool arg1, bool arg2, bool arg3, Tensor & out); // {"schema": "aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void _fused_adam_out(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, TensorList out); // {"schema": "aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)", "dispatch": "True", "default": "True"} +void _fused_adam_out(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, const Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, TensorList out); // {"schema": "aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, const Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)", "dispatch": "True", "default": "True"} +void _fused_adamw_out(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, TensorList out); // {"schema": "aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)", "dispatch": "True", "default": "True"} +void _fused_adamw_out(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, const Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, TensorList out); // {"schema": "aten::_fused_adamw.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, const Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adamw.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)", "dispatch": "True", "default": "True"} +void _fused_sgd_out(TensorList self, TensorList grads, TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf, TensorList out); // {"schema": "aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd(TensorList self, TensorList grads, TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_sgd(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)", "dispatch": "True", "default": "True"} +void _fused_sgd_out(TensorList self, TensorList grads, TensorList momentum_buffer_list, double weight_decay, double momentum, const Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf, TensorList out); // {"schema": "aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd(TensorList self, TensorList grads, TensorList momentum_buffer_list, double weight_decay, double momentum, const Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_sgd.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)", "dispatch": "True", "default": "True"} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Scalar.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Scalar.h new file mode 100644 index 0000000000000000000000000000000000000000..e12557428f15674e4382983c07de64c3e43e8af0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Scalar.h @@ -0,0 +1,3 @@ +#pragma once + +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/SmallVector.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/SmallVector.h new file mode 100644 index 0000000000000000000000000000000000000000..fabfa44190c727c9fdf9aa034d042559da1b621d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/SmallVector.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/SparseTensorImpl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/SparseTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..dfb84c02bb34cbc9313d5efaac57069de17d77c4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/SparseTensorImpl.h @@ -0,0 +1,400 @@ +#pragma once + +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#endif + +namespace at { +struct TORCH_API SparseTensorImpl : public TensorImpl { + // Stored in COO format, indices + values. + + // INVARIANTS: + // sparse_dim: range [0, len(shape)]; sparse_dim + dense_dim = len(shape) + // dense_dim : range [0, len(shape)]; sparse_dim + dense_dim = len(shape) + // _indices.shape: dimensionality: 2, shape: (sparse_dim, nnz) + // _values.shape: dimensionality: 1 + dense_dim. shape: (nnz, + // shape[sparse_dim:]) + + int64_t sparse_dim_ = 0; // number of sparse dimensions + int64_t dense_dim_ = 0; // number of dense dimensions + + Tensor indices_; // always a LongTensor + Tensor values_; + + // A sparse tensor is 'coalesced' if every index occurs at most once in + // the indices tensor, and the indices are in sorted order. (This means + // that it is very easy to convert a coalesced tensor to CSR format: you + // need only compute CSR format indices.) + // + // Most math operations can only be performed on coalesced sparse tensors, + // because many algorithms proceed by merging two sorted lists (of indices). + bool coalesced_ = false; + + // compute_numel with integer multiplication overflow check, see gh-57542 + void refresh_numel() { + TensorImpl::safe_refresh_numel(); + } + + public: + // Public for now... + explicit SparseTensorImpl(at::DispatchKeySet, const caffe2::TypeMeta); + + void release_resources() override; + + int64_t nnz() const { + return values_.size(0); + } + + c10::SymInt sym_nnz() const { + return values_.sym_size(0); + } + int64_t sparse_dim() const { + return sparse_dim_; + } + int64_t dense_dim() const { + return dense_dim_; + } + bool coalesced() const { + return coalesced_; + } + Tensor indices() const { + return indices_; + } + Tensor values() const { + return values_; + } + + void set_size(int64_t dim, int64_t new_size) override; + void set_stride(int64_t dim, int64_t new_stride) override; + void set_storage_offset(int64_t storage_offset) override; + +#ifdef DEBUG + bool has_storage() const override; +#endif + + // WARNING: This function does NOT preserve invariants of sparse_dim/dense_dim + // with respect to indices and values + void raw_resize_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "raw_resize_ ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "raw_resize_ called on tensor with symbolic shape") + set_sizes_and_strides(size, std::vector(size.size())); + sparse_dim_ = sparse_dim; + dense_dim_ = dense_dim; + refresh_numel(); + } + + // NOTE: This function preserves invariants of sparse_dim/dense_dim with + // respect to indices and values. + // + // NOTE: This function supports the following cases: + // 1. When we keep the number of dense dimensions unchanged, and NOT shrinking + // the size of any of the dense dimensions. + // 2. When we keep the number of sparse dimensions unchanged, and NOT + // shrinking the size of any of the sparse dimensions. + // 3. When the sparse tensor has zero nnz, in which case we are free to change + // the shapes of both its sparse and dense dimensions. + // + // This function DOESN'T support (and will throw an error) the following + // cases: + // 1. When we attempt to change the number of sparse dimensions on a non-empty + // sparse tensor (such an operation will invalidate the indices stored). + // 2. When we attempt to change the number of dense dimensions on a non-empty + // sparse tensor (such an operation will behave differently from an equivalent + // dense tensor's resize method, and for API consistency we don't support it). + // 3. When we attempt to shrink the size of any of the dense dimensions on a + // non-empty sparse tensor (such an operation will behave differently from an + // equivalent dense tensor's resize method, and for API consistency we don't + // support it). + // 4. When we attempt to shrink the size of any of the sparse dimensions on a + // non-empty sparse tensor (this could make some of the stored indices + // out-of-bound and thus unsafe). + template + void _resize_(int64_t sparse_dim, int64_t dense_dim, ArrayRef size) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "resize_ ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "resize_ called on tensor with symbolic shape") + TORCH_CHECK( + sparse_dim + dense_dim == static_cast(size.size()), + "number of dimensions must be sparse_dim (", + sparse_dim, + ") + dense_dim (", + dense_dim, + "), but got ", + size.size()); + if (nnz() > 0) { + auto alt_options_msg = + "You could try the following options:\n\ +1. If you need an empty sparse tensor of this size, call `x = torch.sparse_coo_tensor(size)`.\n\ +2. If you need to resize this tensor, you have the following options:\n\ + 1. For both sparse and dense dimensions, keep the number of them constant and the size of them non-shrinking, and then try the same call again.\n\ + 2. Or, create a new sparse tensor with the correct indices and values from this sparse tensor."; + + TORCH_CHECK( + sparse_dim == sparse_dim_, + "changing the number of sparse dimensions (from ", + sparse_dim_, + " to ", + sparse_dim, + ") on a non-empty sparse tensor is not supported.\n", + alt_options_msg); + + TORCH_CHECK( + dense_dim == dense_dim_, + "changing the number of dense dimensions (from ", + dense_dim_, + " to ", + dense_dim, + ") on a non-empty sparse tensor is not supported.\n", + alt_options_msg); + + bool shrinking_sparse_dims = false; + bool shrinking_dense_dim = false; + auto sparse_size_original = generic_sizes().slice(0, sparse_dim); + auto sparse_size_new = size.slice(0, sparse_dim); + for (const auto i : c10::irange(sparse_dim)) { + if (sparse_size_new[i] < sparse_size_original[i]) { + shrinking_sparse_dims = true; + break; + } + } + auto dense_size_original = generic_sizes().slice(sparse_dim); + auto dense_size_new = size.slice(sparse_dim); + for (const auto i : c10::irange(dense_dim)) { + if (dense_size_new[i] < dense_size_original[i]) { + shrinking_dense_dim = true; + break; + } + } + + TORCH_CHECK( + !shrinking_sparse_dims, + "shrinking the size of sparse dimensions (from ", + sparse_size_original, + " to ", + sparse_size_new, + ") on a non-empty sparse tensor is not supported.\n", + alt_options_msg); + + TORCH_CHECK( + !shrinking_dense_dim, + "shrinking the size of dense dimensions (from ", + dense_size_original, + " to ", + dense_size_new, + ") on a non-empty sparse tensor is not supported.\n", + alt_options_msg); + } + + auto sizes_and_strides = generic_sizes(); + const bool size_equals_sizes = std::equal( + size.begin(), + size.end(), + sizes_and_strides.begin(), + sizes_and_strides.end()); + if ((!size_equals_sizes) || (sparse_dim != sparse_dim_) || + (dense_dim != dense_dim_)) { + auto nnz = at::symint::sizes(values())[0]; + std::vector values_size = {nnz}; + auto dense_size = size.slice(sparse_dim); + values_size.insert( + values_size.end(), dense_size.begin(), dense_size.end()); + at::symint::resize_(values_, values_size); + at::symint::resize_(indices_, {T(sparse_dim), nnz}); + } + + if (!size_equals_sizes) { + set_sizes_and_strides(size, std::vector(size.size())); + } + sparse_dim_ = sparse_dim; + dense_dim_ = dense_dim; + refresh_numel(); + } + + void resize_(int64_t sparse_dim, int64_t dense_dim, ArrayRef size) { + return _resize_(sparse_dim, dense_dim, size); + } + + void resize_( + int64_t sparse_dim, + int64_t dense_dim, + ArrayRef size) { + return _resize_(sparse_dim, dense_dim, size); + } + + // NOTE: this function will resize the sparse tensor and also set `indices` + // and `values` to empty. + void resize_and_clear_( + int64_t sparse_dim, + int64_t dense_dim, + IntArrayRef size) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "resize_and_clear_ ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "resize_and_clear_ called on tensor with symbolic shape") + TORCH_CHECK( + sparse_dim + dense_dim == static_cast(size.size()), + "number of dimensions must be sparse_dim (", + sparse_dim, + ") + dense_dim (", + dense_dim, + "), but got ", + size.size()); + + set_sizes_and_strides(size, std::vector(size.size())); + sparse_dim_ = sparse_dim; + dense_dim_ = dense_dim; + + auto empty_indices = at::empty({sparse_dim, 0}, indices().options()); + std::vector values_size = {0}; + auto dense_size = sizes().slice(sparse_dim); + values_size.insert(values_size.end(), dense_size.begin(), dense_size.end()); + auto empty_values = at::empty(values_size, values().options()); + set_indices_and_values_unsafe(empty_indices, empty_values); + refresh_numel(); + } + + void set_coalesced(bool coalesced) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_coalesced ", + err_msg_tensor_metadata_change_not_allowed); + coalesced_ = coalesced; + } + + // NOTE: this function is only used internally and not exposed to Python + // frontend + void set_nnz_and_narrow(int64_t new_nnz) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_nnz_and_narrow ", + err_msg_tensor_metadata_change_not_allowed); + AT_ASSERT(new_nnz <= nnz()); + indices_ = indices_.narrow(1, 0, new_nnz); + values_ = values_.narrow(0, 0, new_nnz); + if (new_nnz < 2) { + coalesced_ = true; + } + } + + // Takes indices and values and directly puts them into the sparse tensor, no + // copy. NOTE: this function is unsafe because it doesn't check whether any + // indices are out of boundaries of `sizes`, so it should ONLY be used where + // we know that the indices are guaranteed to be within bounds. This used to + // be called THSTensor_(_move) NB: This used to be able to avoid a refcount + // bump, but I was too lazy to make it happen + void set_indices_and_values_unsafe( + const Tensor& indices, + const Tensor& values); + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive(key_set(), dtype()); + copy_tensor_metadata( + /*src_sparse_impl=*/this, + /*dest_sparse_impl=*/impl.get(), + /*version_counter=*/version_counter, + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + return impl; + } + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive(key_set(), dtype()); + copy_tensor_metadata( + /*src_sparse_impl=*/this, + /*dest_sparse_impl=*/impl.get(), + /*version_counter=*/std::move(version_counter), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + return impl; + } + + /** + * Shallow-copies data from another TensorImpl into this TensorImpl. + * + * For why this function doesn't check this TensorImpl's + * `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ]. + */ + void shallow_copy_from(const c10::intrusive_ptr& impl) override { + AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set())); + auto sparse_impl = static_cast(impl.get()); + copy_tensor_metadata( + /*src_sparse_impl=*/sparse_impl, + /*dest_sparse_impl=*/this, + /*version_counter=*/version_counter(), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change()); + refresh_numel(); + } + + private: + explicit SparseTensorImpl( + at::DispatchKeySet, + const caffe2::TypeMeta, + at::Tensor indices, + at::Tensor values); + + /** + * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / + * storage_offset) from one TensorImpl to another TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE + * [ TensorImpl Shallow-Copying ]. + */ + static void copy_tensor_metadata( + const SparseTensorImpl* src_sparse_impl, + SparseTensorImpl* dest_sparse_impl, + c10::VariableVersion version_counter, + bool allow_tensor_metadata_change) { + TensorImpl::copy_tensor_metadata( + src_sparse_impl, + dest_sparse_impl, + std::move(version_counter), + allow_tensor_metadata_change); + + // Sparse-specific fields + dest_sparse_impl->sparse_dim_ = src_sparse_impl->sparse_dim(); + dest_sparse_impl->dense_dim_ = src_sparse_impl->dense_dim(); + dest_sparse_impl->indices_ = src_sparse_impl->indices(); + dest_sparse_impl->values_ = src_sparse_impl->values(); + dest_sparse_impl->coalesced_ = src_sparse_impl->coalesced(); + } + + const char* tensorimpl_type_name() const override; +}; + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h new file mode 100644 index 0000000000000000000000000000000000000000..41f14a15ba99c2bb2eb81aeaadbd0b08ac086c4d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h @@ -0,0 +1,144 @@ +#pragma once + +#include +#include + +namespace at { + +// Return if the tensor geometry represented by `sizes` and `strides` is +// contiguous Although we cache is_contiguous in tensor now, this is till useful +// because it allows checking if a particular geometry is contiguous without +// explicitly constructing a tensor, e.g., when you want to choose a kernel +// strategy based on whether a subgeometry is contiguous. +TORCH_API bool geometry_is_contiguous(IntArrayRef sizes, IntArrayRef strides); + +struct TORCH_API TensorGeometry { + TensorGeometry() = default; + + explicit TensorGeometry(c10::SymIntArrayRef sizes) + : sizes_(sizes.vec()), + strides_(sizes.size()), + has_symbolic_sizes_strides_( + !c10::asIntArrayRefSlowOpt(sizes).has_value()) { + int64_t dim = static_cast(sizes.size()); + c10::SymInt expected_stride = 1; + for (int64_t i = dim - 1; i >= 0; i--) { + strides_[i] = expected_stride; + expected_stride *= sizes_[i]; + } + numel_ = expected_stride; + } + + explicit TensorGeometry(const TensorBase& t) + : sizes_(t.sym_sizes().vec()), + strides_(t.sym_strides().vec()), + storage_offset_(t.sym_storage_offset()), + numel_(t.sym_numel()), + has_symbolic_sizes_strides_( + t.unsafeGetTensorImpl()->has_symbolic_sizes_strides()) {} + + // true if the tensor is contiguous + bool is_contiguous() const; + + int64_t dim() const { + return static_cast(sizes_.size()); + } + + int64_t size(int64_t dim) const { + TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); + dim = c10::maybe_wrap_dim(dim, this->dim()); + return sizes_.at(static_cast(dim)).as_int_unchecked(); + } + c10::IntArrayRef sizes() const { + TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); + return c10::asIntArrayRefUnchecked(sizes_); + } + int64_t stride(int64_t dim) const { + TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); + dim = c10::maybe_wrap_dim(dim, this->dim()); + return strides_.at(static_cast(dim)).as_int_unchecked(); + } + c10::IntArrayRef strides() const { + TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); + return c10::asIntArrayRefUnchecked(strides_); + } + int64_t storage_offset() const { + TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); + return storage_offset_.as_int_unchecked(); + } + int64_t numel() const { + TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); + return numel_.as_int_unchecked(); + } + + c10::SymInt sym_size(int64_t dim) const { + dim = c10::maybe_wrap_dim(dim, this->dim()); + return sizes_.at(static_cast(dim)); + } + c10::SymIntArrayRef sym_sizes() const { + return sizes_; + } + c10::SymInt sym_stride(int64_t dim) const { + dim = c10::maybe_wrap_dim(dim, this->dim()); + return strides_.at(static_cast(dim)); + } + c10::SymIntArrayRef sym_strides() const { + return strides_; + } + c10::SymInt sym_storage_offset() const { + return storage_offset_; + } + c10::SymInt sym_numel() const { + return numel_; + } + + TensorGeometry transpose(int64_t dim0, int64_t dim1) { + TensorGeometry r = *this; // copy + TORCH_CHECK( + dim0 < dim(), + "transpose: dim0=", + dim0, + " out of range (dim=", + dim(), + ")") + TORCH_CHECK( + dim1 < dim(), + "transpose: dim1=", + dim1, + " out of range (dim=", + dim(), + ")") + std::swap(r.sizes_[dim0], r.sizes_[dim1]); + std::swap(r.strides_[dim0], r.strides_[dim1]); + return r; + } + + std::vector& mutable_sizes() { + return sizes_; + } + std::vector& mutable_strides() { + return strides_; + } + c10::SymInt& mutable_storage_offset() { + return storage_offset_; + } + void recompute() { + // recalculate numel after a change + c10::SymInt numel = 1; + for (const auto& i : sizes_) { + numel = numel * i; + } + numel_ = std::move(numel); + has_symbolic_sizes_strides_ = + !c10::asIntArrayRefSlowOpt(sizes_).has_value(); + } + + private: + std::vector sizes_; + std::vector strides_; + c10::SymInt storage_offset_; + c10::SymInt numel_; + bool has_symbolic_sizes_strides_{false}; +}; + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorIndexing.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorIndexing.h new file mode 100644 index 0000000000000000000000000000000000000000..b6bb4710900c5570496785c502e492ec76520d93 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorIndexing.h @@ -0,0 +1,735 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#include +#else +#include +#include +#include +#include +#endif + +#include + +#include + +namespace at::indexing { + +constexpr int64_t INDEX_MIN = c10::SymInt::min_representable_int(); +constexpr int64_t INDEX_MAX = -(INDEX_MIN + 1); + +enum class TensorIndexType { None, Ellipsis, SymInt, Boolean, Slice, Tensor }; + +constexpr c10::nullopt_t None = c10::nullopt; + +struct TORCH_API EllipsisIndexType final { + EllipsisIndexType() = default; +}; +TORCH_API extern const EllipsisIndexType Ellipsis; + +struct TORCH_API Slice final { + public: + Slice( + c10::optional start_index = c10::nullopt, + c10::optional stop_index = c10::nullopt, + c10::optional step_index = c10::nullopt) { + if (!step_index.has_value()) { + step_ = c10::SymInt(1); + } else { + step_ = std::move(step_index).value(); + } + + TORCH_CHECK_VALUE(step_ != 0, "slice step cannot be zero"); + + if (!start_index.has_value()) { + start_ = c10::SymInt(step_ < 0 ? INDEX_MAX : 0); + } else { + start_ = std::move(start_index).value(); + } + + if (!stop_index.has_value()) { + stop_ = c10::SymInt(step_ < 0 ? INDEX_MIN : INDEX_MAX); + } else { + stop_ = std::move(stop_index).value(); + } + } + + inline c10::SymInt start() const { + return start_; + } + + inline c10::SymInt stop() const { + return stop_; + } + + inline c10::SymInt step() const { + return step_; + } + + private: + c10::SymInt start_; + c10::SymInt stop_; + c10::SymInt step_; +}; + +TORCH_API std::ostream& operator<<(std::ostream& stream, const Slice& slice); + +// `at::indexing::TensorIndex` is used for converting C++ tensor indices such as +// `{None, "...", Ellipsis, 0, true, Slice(1, None, 2), torch::tensor({1, 2})}` +// into its equivalent `std::vector`, so that further tensor +// indexing operations can be performed using the supplied indices. +// +// There is one-to-one correspondence between Python and C++ tensor index types: +// Python | C++ +// ----------------------------------------------------- +// `None` | `at::indexing::None` +// `Ellipsis` | `at::indexing::Ellipsis` +// `...` | `"..."` +// `123` | `123` +// `True` / `False` | `true` / `false` +// `:` | `Slice()` / `Slice(None, None)` +// `::` | `Slice()` / `Slice(None, None, None)` +// `1:` | `Slice(1, None)` +// `1::` | `Slice(1, None, None)` +// `:3` | `Slice(None, 3)` +// `:3:` | `Slice(None, 3, None)` +// `::2` | `Slice(None, None, 2)` +// `1:3` | `Slice(1, 3)` +// `1::2` | `Slice(1, None, 2)` +// `:3:2` | `Slice(None, 3, 2)` +// `1:3:2` | `Slice(1, 3, 2)` +// `torch.tensor([1, 2])`) | `torch::tensor({1, 2})` +struct TORCH_API TensorIndex final { + // Case 1: `at::indexing::None` + TensorIndex(c10::nullopt_t) : type_(TensorIndexType::None) {} + + // Case 2: "..." / `at::indexing::Ellipsis` + TensorIndex(at::indexing::EllipsisIndexType) + : type_(TensorIndexType::Ellipsis) {} + TensorIndex(const char* str) : TensorIndex(at::indexing::Ellipsis) { + TORCH_CHECK_VALUE( + strcmp(str, "...") == 0, + "Expected \"...\" to represent an ellipsis index, but got \"", + str, + "\""); + } + + // Case 3: (Sym) Integer value + TensorIndex(SymInt integer) + : integer_(std::move(integer)), type_(TensorIndexType::SymInt) {} + TensorIndex(int64_t integer) : TensorIndex(SymInt(integer)) {} + TensorIndex(int integer) : TensorIndex(SymInt(integer)) {} + + // Case 4: Boolean value + template >> + TensorIndex(T boolean) : boolean_(boolean), type_(TensorIndexType::Boolean) {} + + // Case 5: Slice represented in `at::indexing::Slice` form + TensorIndex(Slice slice) + : slice_(std::move(slice)), type_(TensorIndexType::Slice) {} + + // Case 6: Tensor value + TensorIndex(Tensor tensor) + : tensor_(std::move(tensor)), type_(TensorIndexType::Tensor) {} + + inline bool is_none() const { + return type_ == TensorIndexType::None; + } + + inline bool is_ellipsis() const { + return type_ == TensorIndexType::Ellipsis; + } + + inline bool is_integer() const { + return type_ == TensorIndexType::SymInt; + } + + inline SymInt integer() const { + return integer_; + } + + inline bool is_boolean() const { + return type_ == TensorIndexType::Boolean; + } + + inline bool boolean() const { + return boolean_; + } + + inline bool is_slice() const { + return type_ == TensorIndexType::Slice; + } + + inline const Slice& slice() const { + return slice_; + } + + inline bool is_tensor() const { + return type_ == TensorIndexType::Tensor; + } + + inline const Tensor& tensor() const { + return tensor_; + } + + private: + SymInt integer_ = 0; + bool boolean_ = false; + Slice slice_; + Tensor tensor_; + TensorIndexType type_; +}; + +TORCH_API std::ostream& operator<<( + std::ostream& stream, + const TensorIndex& tensor_index); +TORCH_API std::ostream& operator<<( + std::ostream& stream, + const std::vector& tensor_indices); + +namespace impl { +static inline Tensor applySlice( + const Tensor& self, + int64_t dim, + c10::SymInt start, + c10::SymInt stop, + c10::SymInt step, + bool disable_slice_optimization, + const at::Device& self_device, + const c10::optional& self_sizes) { + // TODO: implement negative step + TORCH_CHECK_VALUE(step > 0, "step must be greater than zero"); + + // See NOTE [nested tensor size for indexing] + if (self_sizes.has_value()) { + // Skip this optimization if we are tracing, as the trace may be polymorphic + // over the shape of the `self` tensor, and we still want to record + // the slice. + SymInt length = (self_device == at::kCPU || self_device == at::kCUDA) + ? (*self_sizes)[dim] + : self.sym_size(dim); + if (!disable_slice_optimization && + TORCH_GUARD_SIZE_OBLIVIOUS(start.sym_eq(0)) && length == stop && + step == 1) { + return self; + } + } + return self.slice_symint( + dim, std::move(start), std::move(stop), std::move(step)); +} + +static inline Tensor applySelect( + const Tensor& self, + int64_t dim, + SymInt index, + int64_t real_dim, + const at::Device& /*self_device*/, + const c10::optional& self_sizes) { + // See NOTE [nested tensor size for indexing] + if (self_sizes.has_value()) { + auto maybe_index = index.maybe_as_int(); + if (maybe_index.has_value()) { + TORCH_CHECK_INDEX( + !(maybe_index.value() == 0 && dim == 0 && self_sizes->empty()), + "invalid index of a 0-dim tensor. ", + "Use `tensor.item()` in Python or `tensor.item()` in C++ to convert a 0-dim tensor to a number"); + } + + auto size = (*self_sizes)[dim]; + // Note: `size >= -index` is not equivalent to `size > -1 - index` if index + // is INT64_MIN For std::numeric_limits::min() result of unary + // minus is undefined by the standard but in practice is equal to self. On + // the other hand, indexing wraping is valid for all negative int64_t + // values, as x[INT64_MIN] is the same as x[INT64_MAX] + TORCH_CHECK_INDEX( + size > -1 - index && size > index, + "index ", + index, + " is out of bounds for dimension ", + real_dim, + " with size ", + size); + } + + // if the index is negative, do not normalize it because that would fix the + // index on the current tensor size in the tracer. aten::select also works on + // negative indices + return self.select_symint(dim, std::move(index)); +} + +static inline Tensor boolToIndexingTensorCPUOrCUDA( + const Tensor& self, + bool value) { + // booleans add a dimension of size 1. true indexes this dimension as if 0:, + // false as empty. + if (value) { + return at::empty({1}, self.options().dtype(kLong)).fill_(0.); + } else { + return at::empty({0}, self.options().dtype(kLong)); + } +} + +static inline Tensor boolToIndexingTensorNonNativeDeviceType( + const Tensor& self, + bool value) { + // booleans add a dimension of size 1. true indexes this dimension as if 0:, + // false as empty. + if (value) { + return at::zeros({1}, self.options().dtype(kLong)); + } else { + return at::empty({0}, self.options().dtype(kLong)); + } +} + +static inline Tensor boolToIndexingTensor( + const Tensor& self, + bool value, + const at::Device& self_device) { + if (self_device == at::kCPU || self_device == at::kCUDA) { + return boolToIndexingTensorCPUOrCUDA(self, value); + } else { + return boolToIndexingTensorNonNativeDeviceType(self, value); + } +} + +static inline Tensor scalarToTensorNonNativeDeviceType( + const Scalar& v, + const TensorOptions& options) { + return at::scalar_tensor(v, options); +} + +static inline void recordTensorIndex( + const Tensor& tensor, + std::vector& outIndices, + int64_t* dim_ptr) { + // TODO: check scalarType + outIndices.resize(*dim_ptr + 1); + outIndices[*dim_ptr] = tensor; + (*dim_ptr)++; +}; + +static inline c10::List> typeConvertIndices( + const Tensor& /*self*/, + std::vector&& indices) { + c10::List> converted_inds; + converted_inds.reserve(indices.size()); + for (auto&& i : std::move(indices)) { + converted_inds.push_back(std::move(i)); + } + return converted_inds; +} + +// NOTE: Why do we mirror instead of replace the `count_specified_dimensions` +// function in torch/csrc/autograd/python_variable_indexing.cpp? It's because +// `count_specified_dimensions` is on the hot path of Python tensor multi-dim +// indexing (i.e. it's called by `applySlicing` which is called by +// `THPVariable_getitem` / `THPVariable_setitem` when handling indexing of more +// than one dimension). If we were to merge the Python/C++ +// `count_specified_dimensions` function, on the Python side we would have to +// construct a `std::vector` container to be consumed by the C++ +// `count_specified_dimensions` function, which adds 100s of nanoseconds +// overhead and is undesirable. +static inline int64_t count_specified_dimensions( + const ArrayRef& indices) { + // Count the number of indexed dimensions (everything but ellipsis and None) + int64_t count = 0; + for (auto& obj : indices) { + if (obj.is_tensor()) { + auto& tensor = obj.tensor(); + if (tensor.scalar_type() == kByte || tensor.scalar_type() == kBool) { + count += tensor.dim(); + } else { + count++; + } + } else if (!obj.is_none() && !obj.is_ellipsis() && !obj.is_boolean()) { + count++; + } + } + return count; +} +} // namespace impl + +// NOTE: Many functions below are only for consumption from Python indexing +// implementation, they include: +// +// - `Tensor scalarToTensor(...)` +// - `IntArrayRef slicePrefix1sSize(...)` +// - `void copy_to(...)` +// - `Tensor handleDimInMultiDimIndexing(...)` +// - `Tensor dispatch_index(...)` +// - `Tensor dispatch_index_put_(...)` +// - `Tensor get_item(...)` +// - `void set_item(...)` +// +// The rest of the functions are in `at::indexing::impl` namespace, signifying +// that they shouldn't be used from Python indexing implementation. +static inline Tensor scalarToTensor( + const Scalar& v, + const TensorOptions& options, + const at::Device& self_device) { + if (self_device == at::kCPU && !v.isSymbolic()) { + return at::detail::scalar_tensor_static( + v, options.dtype_opt()->toScalarType(), self_device); + } else { + return impl::scalarToTensorNonNativeDeviceType(v, options); + } +} + +// To match numpy semantics: +// As a special case for backwards compatibility, +// strip away unit dimensions from the left of 'src' +static inline SymIntArrayRef slicePrefix1sSize(const SymIntArrayRef& sizes) { + size_t first_non1_src = sizes.size(); + for (const auto i : c10::irange(sizes.size())) { + // Unbacked SymInt has different behavior, but this is sound because + // failing to slice will only ever cause an error, not divergent + // behavior + if (!sizes[i].has_hint() || sizes[i] != 1) { + first_non1_src = i; + break; + } + } + + return sizes.slice(first_non1_src); +} + +static inline void copy_to(const Tensor& dst, const Tensor& src) { + if (dst.sym_sizes().equals(src.sym_sizes())) { + // A shortcut to avoid generating hard-coded constant sizes during tracing. + // This is not a perfect solution: when src & dst have different shapes, + // constants will still appear. Users can workaround that case by + // dst[index..] = src.reshape(..) + dst.copy_(src); + return; + } else if (src.dim() == 0 && src.device().type() == at::kCPU) { + dst.fill_(src); + return; + } + auto src_view = src.view_symint(slicePrefix1sSize(src.sym_sizes())); + c10::MaybeOwned b_src = expand_inplace(dst, src_view, "setitem"); + dst.copy_(*b_src); +} + +// See NOTE [ Setting `disable_slice_optimization` when calling C++ tensor +// indexing functions from Python ] +static inline Tensor handleDimInMultiDimIndexing( + const Tensor& prev_dim_result, + const Tensor& original_tensor, + const TensorIndex& index, + int64_t* dim_ptr, + int64_t* specified_dims_ptr, + int64_t real_dim, + std::vector& outIndices, + bool disable_slice_optimization, + const at::Device& original_tensor_device, + const c10::optional& prev_dim_result_sizes) { + if (index.is_integer()) { + return impl::applySelect( + prev_dim_result, + *dim_ptr, + index.integer(), + real_dim, + original_tensor_device, + prev_dim_result_sizes); + } else if (index.is_slice()) { + Tensor result = impl::applySlice( + prev_dim_result, + *dim_ptr, + index.slice().start(), + index.slice().stop(), + index.slice().step(), + /*disable_slice_optimization=*/disable_slice_optimization, + original_tensor_device, + prev_dim_result_sizes); + (*dim_ptr)++; + return result; + } else if (index.is_ellipsis()) { + (*dim_ptr) += original_tensor.dim() - (*specified_dims_ptr); + return prev_dim_result; + } else if (index.is_none()) { + Tensor result = prev_dim_result.unsqueeze(*dim_ptr); + (*dim_ptr)++; + return result; + } else if (index.is_boolean()) { + Tensor result = prev_dim_result.unsqueeze(*dim_ptr); + impl::recordTensorIndex( + impl::boolToIndexingTensor( + result, index.boolean(), original_tensor_device), + outIndices, + dim_ptr); + return result; + } else if (index.is_tensor()) { + Tensor result = prev_dim_result; + const Tensor& tensor = index.tensor(); + auto scalar_type = tensor.scalar_type(); + if (tensor.dim() == 0 && + at::isIntegralType(scalar_type, /*includeBool=*/true)) { + if (scalar_type != at::kByte && scalar_type != at::kBool) { + result = impl::applySelect( + result, + *dim_ptr, + tensor.item(), + real_dim, + original_tensor_device, + prev_dim_result_sizes); + } else { + result = result.unsqueeze(*dim_ptr); + if (scalar_type == at::kBool) { + impl::recordTensorIndex( + impl::boolToIndexingTensor( + result, tensor.item() != 0, original_tensor_device), + outIndices, + dim_ptr); + } else { + impl::recordTensorIndex( + impl::boolToIndexingTensor( + result, tensor.item() != 0, original_tensor_device), + outIndices, + dim_ptr); + } + } + } else { + impl::recordTensorIndex(tensor, outIndices, dim_ptr); + } + return result; + } else { + TORCH_INTERNAL_ASSERT(false, "Invalid TensorIndex type"); + } +} + +namespace impl { +// This mirrors `applySlicing` in +// torch/csrc/autograd/python_variable_indexing.cpp +static inline Tensor applySlicing( + const Tensor& self, + const ArrayRef& indices, + std::vector& outIndices, + bool disable_slice_optimization, + const at::Device& self_device, + const c10::optional& self_sizes) { + int64_t dim = 0; + int64_t specified_dims = impl::count_specified_dimensions(indices); + + // See NOTE [nested tensor size for indexing] + if (self_sizes.has_value()) { + TORCH_CHECK_INDEX( + specified_dims <= (int64_t)self_sizes->size(), + "too many indices for tensor of dimension ", + (int)self_sizes->size()); + } + + Tensor result = self; + for (const auto i : c10::irange(indices.size())) { + auto& obj = indices[i]; + // See NOTE [nested tensor size for indexing] + c10::optional result_sizes = result.is_nested() + ? c10::optional(c10::nullopt) + : c10::optional(result.sym_sizes()); + result = handleDimInMultiDimIndexing( + /*prev_dim_result=*/result, + /*original_tensor=*/self, + /*index=*/obj, + /*dim_ptr=*/&dim, + /*specified_dims_ptr=*/&specified_dims, + /*real_dim=*/static_cast(i), + /*outIndices=*/outIndices, + /*disable_slice_optimization=*/disable_slice_optimization, + /*original_tensor_device=*/self_device, + /*prev_dim_result_sizes=*/result_sizes); + } + return result; +} +} // namespace impl + +static inline Tensor dispatch_index( + const Tensor& self, + std::vector&& indices) { + return self.index(impl::typeConvertIndices(self, std::move(indices))); +} + +static inline Tensor dispatch_index_put_( + Tensor& self, + std::vector&& indices, + const Tensor& value) { + return self.index_put_( + impl::typeConvertIndices(self, std::move(indices)), value); +} + +// NOTE [ Setting `disable_slice_optimization` when calling C++ tensor indexing +// functions from Python ] +// +// Question: When should we set `disable_slice_optimization` to `true` when +// calling C++ tensor indexing functions from Python indexing code? +// +// Answer: What "slice optimization" means: when we have a slicing expression +// like `x[0:5, 0]`, where the sliced tensor was of size 5 in dimension 0, we +// would skip dispatching the actual slice call as an optimization. However, +// here are the cases where we DON'T want this optimization: +// +// 1. When we are doing 1-D slicing (e.g. `tensor[:]`). +// Reason: we always return a shallow copy for expressions such as +// `tensor[:]` / `tensor[...]` / `tensor[:, :]`. (Note that for `tensor[:, +// :]`, we return an alias of `tensor` by doing the following: +// ``` +// Tensor sliced = impl::applySlicing(self, indices, tensorIndices, +// disable_slice_optimization, self_device, self_sizes); if +// (tensorIndices.empty()) { +// if (sliced.is_same(self)) { +// // ensure we return a shallow copy for things like x[...] +// sliced = at::alias(sliced); +// } +// return sliced; +// } +// ```) +// 2. When we are doing JIT tracing. +// Reason: JIT tracing needs the `self.slice(...)` call to properly trace the +// slice operation. + +// This mirrors `THPVariable_getitem` in +// torch/csrc/autograd/python_variable_indexing.cpp See NOTE [ Setting +// `disable_slice_optimization` when calling C++ tensor indexing functions from +// Python ] +static inline Tensor get_item( + const Tensor& self, + const ArrayRef& indices, + bool disable_slice_optimization = false) { + at::Device self_device = self.device(); + // NOTE [nested tensor size for indexing] + // nested tensor does not have a size (yet) so for now we represent its size + // as null may need to be changed after we reach a better solution for nested + // tensor size + c10::optional self_sizes = self.is_nested() + ? c10::optional(c10::nullopt) + : c10::optional(self.sym_sizes()); + + // handle simple types: integers, slices, none, ellipsis, bool + if (indices.size() == 1) { + const TensorIndex& index = indices[0]; + if (index.is_integer()) { + return impl::applySelect( + self, 0, index.integer(), 0, self_device, self_sizes); + } else if (index.is_slice()) { + return impl::applySlice( + self, + 0, + index.slice().start(), + index.slice().stop(), + index.slice().step(), + /*disable_slice_optimization=*/true, + self_device, + self_sizes); + } else if (index.is_none()) { + return self.unsqueeze(0); + } else if (index.is_ellipsis()) { + return at::alias(self); + } else if (index.is_boolean()) { + Tensor result = self.unsqueeze(0); + return dispatch_index( + result, + std::vector{impl::boolToIndexingTensor( + result, index.boolean(), self_device)}); + } + } + + std::vector tensorIndices; + Tensor sliced = impl::applySlicing( + self, + indices, + tensorIndices, + disable_slice_optimization, + self_device, + self_sizes); + if (tensorIndices.empty()) { + if (sliced.is_same(self)) { + // ensure we return a shallow copy for things like x[...] + sliced = at::alias(sliced); + } + return sliced; + } + + // indexing by tensors ("advanced" indexing) + return dispatch_index(sliced, std::move(tensorIndices)); +} + +// This mirrors `THPVariable_setitem` in +// torch/csrc/autograd/python_variable_indexing.cpp for "the assigned value is a +// Tensor" case See NOTE [ Setting `disable_slice_optimization` when calling C++ +// tensor indexing functions from Python ] +static inline void set_item( + const Tensor& self, + const ArrayRef& indices, + const Tensor& value, + bool disable_slice_optimization = false) { + at::Device self_device = self.device(); + SymIntArrayRef self_sizes = self.sym_sizes(); + + // handle simple types: integers, slices, ellipsis, bool + if (indices.size() == 1) { + const TensorIndex& index = indices[0]; + if (index.is_boolean() && !index.boolean()) { + // do nothing for false (technically we should check the size, but we + // don't have real 0-sized shapes. + return; + } else if (index.is_ellipsis()) { + copy_to(self, value); + return; + } else if (index.is_none() || (index.is_boolean() && index.boolean())) { + copy_to(self.unsqueeze(0), value); + return; + } else if (index.is_integer()) { + copy_to( + impl::applySelect( + self, 0, index.integer(), 0, self_device, self_sizes), + value); + return; + } else if (index.is_slice()) { + copy_to( + impl::applySlice( + self, + 0, + index.slice().start(), + index.slice().stop(), + index.slice().step(), + /*disable_slice_optimization=*/disable_slice_optimization, + self_device, + self_sizes), + value); + return; + } + } + + std::vector tensorIndices; + Tensor sliced = impl::applySlicing( + self, + indices, + tensorIndices, + disable_slice_optimization, + self_device, + self_sizes); + if (tensorIndices.empty()) { + copy_to(sliced, value); + return; + } + + SymIntArrayRef valueSizes = value.sym_sizes(); + SymIntArrayRef slicedValueSizes = slicePrefix1sSize(valueSizes); + Tensor valuesSliced; + if (!valueSizes.equals(slicedValueSizes)) { + valuesSliced = value.view_symint(slicedValueSizes); + } else { + valuesSliced = value; + } + dispatch_index_put_(sliced, std::move(tensorIndices), valuesSliced); + return; +} + +} // namespace at::indexing diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h new file mode 100644 index 0000000000000000000000000000000000000000..8c5003a676d80fea79e7facab42a2818d9e2aa74 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h @@ -0,0 +1,137 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { + +class Tensor; + +namespace impl { + +// Use this to define the prototype for a meta function. There are two +// versions; one that takes one argument (just the operator name), or FUNC2 +// variant that takes two arguments (operator name and overload name). +// +// Example usage: +// +// TORCH_META_FUNC2(add, Tensor) ( +// const Tensor& self, const Tensor& other +// ) { +// ... compute sizes and options ... +// set_output(sizes, options); +// } +// +#define TORCH_META_FUNC(name) void structured_##name::meta +#define TORCH_META_FUNC2(name, overload) \ + void structured_##name##_##overload::meta + +// These are versions of TORCH_META_FUNC(2) that include a precompute_out struct +// as a return value. They should be used when the kernel in question has +// precomputed values declared in native_functions.yaml and the corresponding +// implementation should return an instance of the aforementioned struct. +#define TORCH_PRECOMPUTE_META_FUNC(name) \ + structured_##name::meta_return_ty structured_##name::meta +#define TORCH_PRECOMPUTE_META_FUNC2(name, overload) \ + structured_##name##_##overload::meta_return_ty \ + structured_##name##_##overload::meta + +// Use this to create a precompute struct in a meta function. +#define TORCH_PRECOMPUTE_STRUCT(name) structured_##name::precompute_out<> +#define TORCH_PRECOMPUTE_STRUCT2(name, overload) \ + structured_##name##_##overload::precompute_out<> + +// Use this to define the prototype for an implementation. This takes only +// one argument, which is the name of the dispatch key entry you're +// implementing. +// +// Example usage: +// +// TORCH_IMPL_FUNC(add_cpu) ( +// Tensor& result, const Tensor& self, const Tensor& other +// ) { +// ... do the actual implementation ... +// } +// +#define TORCH_IMPL_FUNC(name) void structured_##name::impl + +// Base class for all structured kernel classes. The set_output virtual +// method is varied depending whether or not the operator is +// functional/out/inplace, and could also be specialized for CPU/CUDA/etc +// (although presently it isn't). +// +// A notable subclass of this interface is TensorIteratorBase. +struct TORCH_API MetaBase { + MetaBase() = default; + MetaBase(const MetaBase&) = default; + MetaBase& operator=(const MetaBase&) = default; + MetaBase(MetaBase&&) noexcept = default; + MetaBase& operator=(MetaBase&&) noexcept = default; + virtual const Tensor& maybe_get_output(int64_t output_idx) = 0; + + // Note: [set_output_*] + // See: https://github.com/pytorch/pytorch/issues/69813 + // Whenever defining the output properties in the META function of a + // structured kernel (what was usually done with `set_output`), use one of + // these 3 variants, instead. In order to decide which variant to use, check + // the following decision tree: + // + // - Can the kernel you are going to implement support output tensors + // with arbitrary strides? + // | + // -- YES: `set_output_raw_strided` + // | + // -- NO: Should the output tensor strides be contiguous? + // | + // -- YES: `set_output_contiguous` + // | + // -- NO: `set_output_strided` + // + // Use this function whenever the kernel requires specific strides for the + // output. If `strides` does not match the given output strides, proxy outputs + // will be created and passed to the IMPL function. + virtual void set_output_strided( + int64_t output_idx, + IntArrayRef sizes, + IntArrayRef strides, + TensorOptions options, + DimnameList names = {}) { + TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented."); + } + + // Use this function whenever the kernel knows how to handle arbitrary strided + // outputs. This function has the same behavior as the old `set_output`: it + // will only re-stride if the given output was resized. + virtual void set_output_raw_strided( + int64_t output_idx, + IntArrayRef sizes, + IntArrayRef strides_hint, + TensorOptions options, + DimnameList names = {}) { + TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented."); + } + + // Use this function if the kernel requires contiguous strides. + // Alias for `set_output_strided`, but with contiguous strides. + void set_output_contiguous( + int64_t output_idx, + IntArrayRef sizes, + TensorOptions options, + DimnameList names = {}) { + auto strides = c10::contiguous_strides(sizes); + set_output_strided(output_idx, sizes, strides, options, names); + } + + // Returns a reference to an undefined tensor if there is no presupplied + // output + const Tensor& maybe_get_output() { + return maybe_get_output(0); + } + virtual ~MetaBase() = default; +}; + +} // namespace impl + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h new file mode 100644 index 0000000000000000000000000000000000000000..616efc14d2599d7f1a9f73f04fdf960e05bfcf2e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h @@ -0,0 +1,75 @@ +#pragma once + +#include + +namespace at::namedinference { + +// TensorName and TensorNames are wrappers around Dimname and DimnameList +// that contain helper functions to make writing name inference rules easier. +// +// A TensorName represents a Dimname associated with some DimnameList (from a +// Tensor). This encapsulates all the information that is needed to check if +// names *match* and to *unify* names. +// +// Definition: Two names in two tensors *match* if they are equal, or if at +// least one of them is a wildcard that can be *refined* to the other name. +// +// Definition: unify(name, other) fails if the names do not match. Otherwise, +// it returns the most refined of name and other. +// +// Here is an example of checking if two names match. +// tensor: Tensor[A, None] +// other: Tensor[A] +// +// Let's say we wish to check if tensor.names[-1] matches other.names[-1]. +// None (in tensor) cannot match A (in other) because if the None were refined +// to A, `tensor` would have duplicate names [A, A]. Therefore we need to check +// tensor.names [A, None] for the existence of A. +struct TORCH_API TensorName { + explicit TensorName(ArrayRef origin, int origin_idx) + : origin_(origin), + name_(origin[maybe_wrap_dim( + origin_idx, + static_cast(origin.size()))]), + origin_idx_(origin_idx) {} + + // op_name is only used for error reporting. + const TensorName& unify(const TensorName& other, const char* op_name) const; + Dimname toDimname() const; + + private: + ArrayRef origin_; + Dimname name_; + int origin_idx_; // A named tensor can have at most 64 dims. + + TORCH_API friend std::ostream& operator<<( + std::ostream& out, + const TensorName& tensorname); +}; + +using TensorNameVec = SmallVector; + +struct TORCH_API TensorNames { + explicit TensorNames(ArrayRef names); + + // Create TensorNames from names[start:end]. Each individual TensorName stores + // `names`, NOT names[start:end], because the original tensor's names are + // `names`. + explicit TensorNames(ArrayRef names, int64_t start, int64_t end); + + // op_name is only used for error reporting. + TensorNames& unifyFromRightInplace( + const TensorNames& other, + const char* op_name = "unify"); + void checkUnique(const char* op_name) const; + + void append(TensorName name); + std::vector toDimnameVec() const; + + private: + explicit TensorNames(TensorNameVec&& names) : names_(std::move(names)){}; + + TensorNameVec names_; +}; + +} // namespace at::namedinference diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorOperators.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorOperators.h new file mode 100644 index 0000000000000000000000000000000000000000..7567af4cbfe466843b1d48d78ffd259035cd62dc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorOperators.h @@ -0,0 +1,51 @@ +#pragma once + +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at { + +#define AT_FORALL_BINARY_OPS(_) \ + _(+, x.add(y), y.add(x)) \ + _(*, x.mul(y), y.mul(x)) \ + _(-, \ + x.sub(y), \ + ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).sub_(y)) \ + _(/, \ + x.div(y), \ + ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).div_(y)) \ + _(%, \ + x.remainder(y), \ + ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).remainder_(y)) \ + _(&, x.bitwise_and(y), y.bitwise_and(x)) \ + _(|, x.bitwise_or(y), y.bitwise_or(x)) \ + _(^, x.bitwise_xor(y), y.bitwise_xor(x)) \ + _(<, x.lt(y), y.gt(x)) \ + _(<=, x.le(y), y.ge(x)) \ + _(>, x.gt(y), y.lt(x)) \ + _(>=, x.ge(y), y.le(x)) \ + _(==, x.eq(y), y.eq(x)) \ + _(!=, x.ne(y), y.ne(x)) + +#define DEFINE_OPERATOR(op, body, reverse_scalar_body) \ + static inline Tensor operator op(const Tensor& x, const Tensor& y) { \ + return body; \ + } \ + static inline Tensor operator op(const Tensor& x, const Scalar& y) { \ + return body; \ + } \ + static inline Tensor operator op(const Scalar& x, const Tensor& y) { \ + return reverse_scalar_body; \ + } + +AT_FORALL_BINARY_OPS(DEFINE_OPERATOR) +#undef DEFINE_OPERATOR +#undef AT_FORALL_BINARY_OPS + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorOptions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..b3edba8efdf726cea92059cb01e34ee25206482c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorOptions.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorSubclassLikeUtils.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorSubclassLikeUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..a9a0b4ecdcf8b9e323d41f0b39941528a2f0b0cd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorSubclassLikeUtils.h @@ -0,0 +1,86 @@ +#pragma once +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at { + +// Note [Tensor-subclass-like Tensors] +// Tensor-subclass-like is defined as: +// - a Tensor subclass (via __torch_dispatch__ in Python or extending +// TensorImpl in C++) +// - anything else that shares the same perils as Tensor subclasses. +// For example, many Tensor subclasses do not have storage and meta Tensors +// do not have storage either, so meta Tensors belong here. +// +// We should ensure that PyTorch internals supports Tensor-subclass-like +// objects. In particular, Tensor-subclass-like objects struggle with two +// classes of operations that are problematic for Tensor subclasses: +// 1. Because some Tensor subclasses do not have storage, .item() or +// .data_ptr() calls are not good. +// 2. Certain in-place operations can eliminate the typing of the Tensor +// subclass. For example: +// >>> torch.zeros(input.sizes(), grad.options()).diag().copy_(input) +// If input is a Tensor subclass, then the above ends up either erroring out +// or returning a regular non-Tensor-subclass Tensor! + +constexpr auto kFunctorchWrappedTensors = DispatchKeySet( + {DispatchKey::FuncTorchGradWrapper, + DispatchKey::FuncTorchBatched, + DispatchKey::Functionalize}); + +constexpr auto kTensorSubclassLike = + kFunctorchWrappedTensors | + DispatchKeySet( + {// WARNING: DO NOT put combined backend component + functionality keys + // here, you will incorrectly always match on the functionality key + // no matter the backend component + DispatchKey::Batched, + DispatchKey::Sparse, + DispatchKey::SparseCsr, + DispatchKey::Python}) | + DispatchKeySet(BackendComponent::MetaBit); + +inline bool isTensorSubclassLike(const Tensor& tensor) { + if (c10::impl::dispatch_mode_enabled()) + return true; + auto key_set = tensor.unsafeGetTensorImpl()->key_set(); + return !(key_set & kTensorSubclassLike).empty(); +} + +inline bool areAnyTensorSubclassLike(TensorList tensors) { + if (c10::impl::dispatch_mode_enabled()) + return true; + return std::any_of(tensors.begin(), tensors.end(), isTensorSubclassLike); +} + +inline bool areAnyOptionalTensorSubclassLike( + const c10::List>& tensors) { + if (c10::impl::dispatch_mode_enabled()) + return true; + return std::any_of( + tensors.begin(), tensors.end(), [](const optional& opt_tensor) { + return ( + opt_tensor.has_value() && isTensorSubclassLike(opt_tensor.value())); + }); +} + +// Helper function to deal testing truthfulness of a scalar tensor +// in a Composite Compliant manner. +// NOTE: This function expects a scalar tensor of boolean dtype. +// Eg. +// Non-Composite Compliant Pattern : (t == 0).all().item() +// Composite Compliant Patter : is_salar_tensor_true((t == 0).all()) +inline bool is_scalar_tensor_true(const Tensor& t) { + TORCH_INTERNAL_ASSERT(t.dim() == 0) + TORCH_INTERNAL_ASSERT(t.scalar_type() == kBool) + return at::equal(t, t.new_ones({}, t.options())); +} + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalPythonObjects.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalPythonObjects.h new file mode 100644 index 0000000000000000000000000000000000000000..c45de86db3abeffe22cb8db559f602d88b35be9c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalPythonObjects.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include +#include + +namespace at::impl { + +struct TORCH_API ThreadLocalPythonObjects { + static void set(const std::string& key, std::shared_ptr value); + static const std::shared_ptr& get(const std::string& key); + static bool contains(const std::string& key); + + static const ThreadLocalPythonObjects& get_state(); + static void set_state(ThreadLocalPythonObjects state); + + private: + std::unordered_map> obj_dict_; +}; + +} // namespace at::impl diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Version.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Version.h new file mode 100644 index 0000000000000000000000000000000000000000..706da58a5da01c35fda7f2c6374c8f5868f1b642 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Version.h @@ -0,0 +1,18 @@ +#include + +namespace at { + +/// Returns a detailed string describing the configuration PyTorch. +TORCH_API std::string show_config(); + +TORCH_API std::string get_mkl_version(); + +TORCH_API std::string get_mkldnn_version(); + +TORCH_API std::string get_openmp_version(); + +TORCH_API std::string get_cxx_flags(); + +TORCH_API std::string get_cpu_capability(); + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/VmapGeneratedPlumbing.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/VmapGeneratedPlumbing.h new file mode 100644 index 0000000000000000000000000000000000000000..31c73b1a8b11e0dd14fc110572caba85eb75c69b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/VmapGeneratedPlumbing.h @@ -0,0 +1,32614 @@ + +#pragma once +#include +#include + +namespace at { namespace functorch { + +template +at::Tensor _cast_Byte_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Byte::call(self, non_blocking); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Char_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Char::call(self, non_blocking); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Double_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Double::call(self, non_blocking); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Float_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Float::call(self, non_blocking); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Int_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Int::call(self, non_blocking); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Long_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Long::call(self, non_blocking); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Short_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Short::call(self, non_blocking); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Half_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Half::call(self, non_blocking); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _backward_generated_plumbing(const at::Tensor & self, at::TensorList inputs, const c10::optional & gradient, c10::optional retain_graph, bool create_graph) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(inputs, cur_level) && !isBatchedAtLevel(gradient, cur_level)) { + return at::_ops::_backward::call(self, inputs, gradient, retain_graph, create_graph); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional gradient_value; + optional gradient_bdim; + if (gradient) { + std::tie(gradient_value, gradient_bdim) = unwrapTensorAtLevel(gradient.value(), cur_level); + } + batch_rule(self_value, self_bdim, inputs, gradient_value, gradient_bdim, retain_graph, create_graph); +} +template +void set_data_generated_plumbing(at::Tensor & self, const at::Tensor & new_data) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(new_data, cur_level)) { + return at::_ops::set_data::call(self, new_data); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor new_data_value; + optional new_data_bdim; + std::tie(new_data_value, new_data_bdim) = unwrapTensorAtLevel(new_data, cur_level); + batch_rule(self_value, self_bdim, new_data_value, new_data_bdim); +} +template +at::Tensor data_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::data::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & requires_grad__generated_plumbing(at::Tensor & self, bool requires_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::requires_grad_::call(self, requires_grad); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, requires_grad); + return self; +} +template +void retain_grad_generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::retain_grad::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); +} +template +at::Tensor _fw_primal_generated_plumbing(const at::Tensor & self, int64_t level) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fw_primal::call(self, level); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, level); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _make_dual_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) { + return at::_ops::_make_dual::call(primal, tangent, level); + } + Tensor primal_value; + optional primal_bdim; + std::tie(primal_value, primal_bdim) = unwrapTensorAtLevel(primal, cur_level); + Tensor tangent_value; + optional tangent_bdim; + std::tie(tangent_value, tangent_bdim) = unwrapTensorAtLevel(tangent, cur_level); + auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _unpack_dual_generated_plumbing(const at::Tensor & dual, int64_t level) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dual, cur_level)) { + return at::_ops::_unpack_dual::call(dual, level); + } + Tensor dual_value; + optional dual_bdim; + std::tie(dual_value, dual_bdim) = unwrapTensorAtLevel(dual, cur_level); + auto results = batch_rule(dual_value, dual_bdim, level); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _new_zeros_with_same_feature_meta_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_new_zeros_with_same_feature_meta::call(self, other, self_num_batch_dims); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, self_num_batch_dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rename_generated_plumbing(const at::Tensor & self, c10::optional names) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rename::call(self, names); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, names); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor align_to_generated_plumbing(const at::Tensor & self, at::DimnameList names) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::align_to::call(self, names); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, names); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor align_to_ellipsis_idx_generated_plumbing(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::align_to_ellipsis_idx::call(self, order, ellipsis_idx); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, order, ellipsis_idx); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor align_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::align_as::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector align_tensors_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::align_tensors::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _assert_async_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_assert_async::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); +} +template +void _assert_async_msg_generated_plumbing(const at::Tensor & self, c10::string_view assert_msg) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_assert_async_msg::call(self, assert_msg); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, assert_msg); +} +template +at::Tensor _functional_assert_scalar_generated_plumbing(const at::Scalar & self, c10::string_view assert_msg, const at::Tensor & dep_token) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dep_token, cur_level)) { + return at::_ops::_functional_assert_scalar::call(self, assert_msg, dep_token); + } + Tensor dep_token_value; + optional dep_token_bdim; + std::tie(dep_token_value, dep_token_bdim) = unwrapTensorAtLevel(dep_token, cur_level); + auto results = batch_rule(self, assert_msg, dep_token_value, dep_token_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _functional_assert_async_msg_generated_plumbing(const at::Tensor & self, c10::string_view assert_msg, const at::Tensor & dep_token) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dep_token, cur_level)) { + return at::_ops::_functional_assert_async_msg::call(self, assert_msg, dep_token); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor dep_token_value; + optional dep_token_bdim; + std::tie(dep_token_value, dep_token_bdim) = unwrapTensorAtLevel(dep_token, cur_level); + auto results = batch_rule(self_value, self_bdim, assert_msg, dep_token_value, dep_token_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _assert_tensor_metadata_generated_plumbing(const at::Tensor & a, at::OptionalSymIntArrayRef size, at::OptionalSymIntArrayRef stride, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(a, cur_level)) { + return at::_ops::_assert_tensor_metadata::call(a, size, stride, dtype); + } + Tensor a_value; + optional a_bdim; + std::tie(a_value, a_bdim) = unwrapTensorAtLevel(a, cur_level); + batch_rule(a_value, a_bdim, size, stride, dtype); +} +template +at::Tensor _functional_sym_constrain_range_generated_plumbing(const at::Scalar & size, c10::optional min, c10::optional max, const at::Tensor & dep_token) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dep_token, cur_level)) { + return at::_ops::_functional_sym_constrain_range::call(size, min, max, dep_token); + } + Tensor dep_token_value; + optional dep_token_bdim; + std::tie(dep_token_value, dep_token_bdim) = unwrapTensorAtLevel(dep_token, cur_level); + auto results = batch_rule(size, min, max, dep_token_value, dep_token_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _functional_sym_constrain_range_for_size_generated_plumbing(const at::Scalar & size, c10::optional min, c10::optional max, const at::Tensor & dep_token) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dep_token, cur_level)) { + return at::_ops::_functional_sym_constrain_range_for_size::call(size, min, max, dep_token); + } + Tensor dep_token_value; + optional dep_token_bdim; + std::tie(dep_token_value, dep_token_bdim) = unwrapTensorAtLevel(dep_token, cur_level); + auto results = batch_rule(size, min, max, dep_token_value, dep_token_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor refine_names_generated_plumbing(const at::Tensor & self, at::DimnameList names) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::refine_names::call(self, names); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, names); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _cudnn_ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) { + return at::_ops::_cudnn_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity); + } + Tensor log_probs_value; + optional log_probs_bdim; + std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level); + Tensor targets_value; + optional targets_bdim; + std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level); + auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, deterministic, zero_infinity); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _cudnn_ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) { + return at::_ops::_cudnn_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity); + } + Tensor log_probs_value; + optional log_probs_bdim; + std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level); + Tensor targets_value; + optional targets_bdim; + std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level); + Tensor input_lengths_value; + optional input_lengths_bdim; + std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level); + Tensor target_lengths_value; + optional target_lengths_bdim; + std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level); + auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, deterministic, zero_infinity); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _cudnn_rnn_flatten_weight_generated_plumbing(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight_arr, cur_level)) { + return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); + } + + auto results = batch_rule(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _cudnn_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) { + return at::_ops::_cudnn_rnn::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + optional weight_buf_value; + optional weight_buf_bdim; + if (weight_buf) { + std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf.value(), cur_level); + } + optional cx_value; + optional cx_bdim; + if (cx) { + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level); + } + optional dropout_state_value; + optional dropout_state_bdim; + if (dropout_state) { + std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple> _cudnn_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) { + return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_buf_value; + optional weight_buf_bdim; + std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + Tensor reserve_value; + optional reserve_bdim; + std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level); + optional cx_value; + optional cx_bdim; + if (cx) { + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level); + } + optional grad_output_value; + optional grad_output_bdim; + if (grad_output) { + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level); + } + optional grad_hy_value; + optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + optional grad_cy_value; + optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + optional dropout_state_value; + optional dropout_state_bdim; + if (dropout_state) { + std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple _fused_dropout_generated_plumbing(const at::Tensor & self, double p, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fused_dropout::call(self, p, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, generator); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _masked_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, double scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_masked_scale::call(self, mask, scale); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, scale); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple native_dropout_generated_plumbing(const at::Tensor & input, double p, c10::optional train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::native_dropout::call(input, p, train); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, p, train); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor native_dropout_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & mask, double scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::native_dropout_backward::call(grad_output, mask, scale); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, mask_value, mask_bdim, scale); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _sobol_engine_draw_generated_plumbing(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(quasi, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) { + return at::_ops::_sobol_engine_draw::call(quasi, n, sobolstate, dimension, num_generated, dtype); + } + Tensor quasi_value; + optional quasi_bdim; + std::tie(quasi_value, quasi_bdim) = unwrapTensorAtLevel(quasi, cur_level); + Tensor sobolstate_value; + optional sobolstate_bdim; + std::tie(sobolstate_value, sobolstate_bdim) = unwrapTensorAtLevel(sobolstate, cur_level); + auto results = batch_rule(quasi_value, quasi_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated, dtype); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor & _sobol_engine_ff__generated_plumbing(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) { + return at::_ops::_sobol_engine_ff_::call(self, n, sobolstate, dimension, num_generated); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor sobolstate_value; + optional sobolstate_bdim; + std::tie(sobolstate_value, sobolstate_bdim) = unwrapTensorAtLevel(sobolstate, cur_level); + batch_rule(self_value, self_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated); + return self; +} +template +at::Tensor & _sobol_engine_scramble__generated_plumbing(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(ltm, cur_level)) { + return at::_ops::_sobol_engine_scramble_::call(self, ltm, dimension); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor ltm_value; + optional ltm_bdim; + std::tie(ltm_value, ltm_bdim) = unwrapTensorAtLevel(ltm, cur_level); + batch_rule(self_value, self_bdim, ltm_value, ltm_bdim, dimension); + return self; +} +template +at::Tensor & _sobol_engine_initialize_state__generated_plumbing(at::Tensor & self, int64_t dimension) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sobol_engine_initialize_state_::call(self, dimension); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dimension); + return self; +} +template +at::Tensor _reshape_from_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & shape) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(shape, cur_level)) { + return at::_ops::_reshape_from_tensor::call(self, shape); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor shape_value; + optional shape_bdim; + std::tie(shape_value, shape_bdim) = unwrapTensorAtLevel(shape, cur_level); + auto results = batch_rule(self_value, self_bdim, shape_value, shape_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _shape_as_tensor_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_shape_as_tensor::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor dropout_generated_plumbing(const at::Tensor & input, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::dropout::call(input, p, train); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, p, train); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & dropout__generated_plumbing(at::Tensor & self, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::dropout_::call(self, p, train); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, train); + return self; +} +template +at::Tensor feature_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::feature_dropout::call(input, p, train); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, p, train); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & feature_dropout__generated_plumbing(at::Tensor & self, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::feature_dropout_::call(self, p, train); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, train); + return self; +} +template +at::Tensor alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::alpha_dropout::call(input, p, train); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, p, train); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::alpha_dropout_::call(self, p, train); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, train); + return self; +} +template +at::Tensor feature_alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::feature_alpha_dropout::call(input, p, train); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, p, train); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & feature_alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::feature_alpha_dropout_::call(self, p, train); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, train); + return self; +} +template +at::Tensor abs_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::abs::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & abs__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::abs_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor absolute_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::absolute::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & absolute__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::absolute_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor angle_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::angle::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_as_real_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_as_real::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_as_complex_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_as_complex::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sgn_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sgn::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sgn__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sgn_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor chalf_generated_plumbing(const at::Tensor & self, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::chalf::call(self, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor real_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::real::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor imag_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::imag::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _conj_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_conj::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conj_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::conj::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _conj_physical_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_conj_physical::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conj_physical_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::conj_physical::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & conj_physical__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::conj_physical_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor resolve_conj_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::resolve_conj::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor resolve_neg_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::resolve_neg::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _neg_view_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_neg_view::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor acos_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::acos::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & acos__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::acos_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor arccos_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arccos::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arccos__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arccos_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::avg_pool1d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor adaptive_avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adaptive_avg_pool1d::call(self, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple adaptive_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adaptive_max_pool1d::call(self, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor add_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::add_Tensor::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & add__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::add__Tensor::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return self; +} +template +at::Tensor _add_relu_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_add_relu_Tensor::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & _add_relu__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_add_relu__Tensor::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return self; +} +template +at::Tensor _add_relu_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_add_relu_Scalar::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & _add_relu__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_add_relu__Scalar::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other, alpha); + return self; +} +template +at::Tensor add_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::add_Scalar::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & add__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::add__Scalar::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other, alpha); + return self; +} +template +at::Tensor addmv_generated_plumbing(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) { + return at::_ops::addmv::call(self, mat, vec, beta, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat_value; + optional mat_bdim; + std::tie(mat_value, mat_bdim) = unwrapTensorAtLevel(mat, cur_level); + Tensor vec_value; + optional vec_bdim; + std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level); + auto results = batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & addmv__generated_plumbing(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) { + return at::_ops::addmv_::call(self, mat, vec, beta, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat_value; + optional mat_bdim; + std::tie(mat_value, mat_bdim) = unwrapTensorAtLevel(mat, cur_level); + Tensor vec_value; + optional vec_bdim; + std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level); + batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha); + return self; +} +template +at::Tensor addr_generated_plumbing(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) { + return at::_ops::addr::call(self, vec1, vec2, beta, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor vec1_value; + optional vec1_bdim; + std::tie(vec1_value, vec1_bdim) = unwrapTensorAtLevel(vec1, cur_level); + Tensor vec2_value; + optional vec2_bdim; + std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level); + auto results = batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & addr__generated_plumbing(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) { + return at::_ops::addr_::call(self, vec1, vec2, beta, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor vec1_value; + optional vec1_bdim; + std::tie(vec1_value, vec1_bdim) = unwrapTensorAtLevel(vec1, cur_level); + Tensor vec2_value; + optional vec2_bdim; + std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level); + batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha); + return self; +} +template +at::Tensor affine_grid_generator_generated_plumbing(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(theta, cur_level)) { + return at::_ops::affine_grid_generator::call(theta, size, align_corners); + } + Tensor theta_value; + optional theta_bdim; + std::tie(theta_value, theta_bdim) = unwrapTensorAtLevel(theta, cur_level); + auto results = batch_rule(theta_value, theta_bdim, size, align_corners); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef size, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level)) { + return at::_ops::affine_grid_generator_backward::call(grad, size, align_corners); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + auto results = batch_rule(grad_value, grad_bdim, size, align_corners); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _is_all_true_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_is_all_true::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _is_any_true_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_is_any_true::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_check_tensor_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_check_tensor::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_functorch_fallback_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_test_functorch_fallback::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor all_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::all_dim::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor all_dims_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::all_dims::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor all_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::all_dimname::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor any_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::any_dim::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor any_dims_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::any_dims::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor any_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::any_dimname::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _dim_arange_generated_plumbing(const at::Tensor & like, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(like, cur_level)) { + return at::_ops::_dim_arange::call(like, dim); + } + Tensor like_value; + optional like_bdim; + std::tie(like_value, like_bdim) = unwrapTensorAtLevel(like, cur_level); + auto results = batch_rule(like_value, like_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor argmax_generated_plumbing(const at::Tensor & self, c10::optional dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::argmax::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor argmin_generated_plumbing(const at::Tensor & self, c10::optional dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::argmin::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor acosh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::acosh::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & acosh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::acosh_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor arccosh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arccosh::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arccosh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arccosh_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor asinh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::asinh::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & asinh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::asinh_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor arcsinh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arcsinh::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arcsinh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arcsinh_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor atanh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atanh::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & atanh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atanh_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor arctanh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arctanh::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arctanh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arctanh_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor as_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::as_strided::call(self, size, stride, storage_offset); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor asin_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::asin::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & asin__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::asin_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor arcsin_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arcsin::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arcsin__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arcsin_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor atan_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atan::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & atan__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atan_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor arctan_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arctan::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arctan__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arctan_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor atleast_1d_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atleast_1d::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector atleast_1d_Sequence_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::atleast_1d_Sequence::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor atleast_2d_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atleast_2d::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector atleast_2d_Sequence_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::atleast_2d_Sequence::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor atleast_3d_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atleast_3d::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector atleast_3d_Sequence_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::atleast_3d_Sequence::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor baddbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) { + return at::_ops::baddbmm::call(self, batch1, batch2, beta, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor batch1_value; + optional batch1_bdim; + std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level); + Tensor batch2_value; + optional batch2_bdim; + std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level); + auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & baddbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) { + return at::_ops::baddbmm_::call(self, batch1, batch2, beta, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor batch1_value; + optional batch1_bdim; + std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level); + Tensor batch2_value; + optional batch2_bdim; + std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level); + batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha); + return self; +} +template +at::Tensor batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + optional running_mean_value; + optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + optional running_var_value; + optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps, cudnn_enabled); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantized_batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(var, cur_level)) { + return at::_ops::quantized_batch_norm::call(input, weight, bias, mean, var, eps, output_scale, output_zero_point); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor mean_value; + optional mean_bdim; + std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level); + Tensor var_value; + optional var_bdim; + std::tie(var_value, var_bdim) = unwrapTensorAtLevel(var, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, var_value, var_bdim, eps, output_scale, output_zero_point); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _batch_norm_impl_index_backward_generated_plumbing(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var_transform, bool train, double eps, ::std::array output_mask, const at::Tensor & reservedSpace) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var_transform, cur_level) && !isBatchedAtLevel(reservedSpace, cur_level)) { + return at::_ops::_batch_norm_impl_index_backward::call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor reservedSpace_value; + optional reservedSpace_bdim; + std::tie(reservedSpace_value, reservedSpace_bdim) = unwrapTensorAtLevel(reservedSpace, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional running_mean_value; + optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + optional running_var_value; + optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + optional save_mean_value; + optional save_mean_bdim; + if (save_mean) { + std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level); + } + optional save_var_transform_value; + optional save_var_transform_bdim; + if (save_var_transform) { + std::tie(save_var_transform_value, save_var_transform_bdim) = unwrapTensorAtLevel(save_var_transform.value(), cur_level); + } + auto results = batch_rule(impl_index, input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_transform_value, save_var_transform_bdim, train, eps, output_mask, reservedSpace_value, reservedSpace_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor bernoulli_generated_plumbing(const at::Tensor & self, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bernoulli::call(self, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bernoulli__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & p, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) { + return at::_ops::bernoulli__Tensor::call(self, p, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor p_value; + optional p_bdim; + std::tie(p_value, p_bdim) = unwrapTensorAtLevel(p, cur_level); + batch_rule(self_value, self_bdim, p_value, p_bdim, generator); + return self; +} +template +at::Tensor & bernoulli__float_generated_plumbing(at::Tensor & self, double p, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bernoulli__float::call(self, p, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, generator); + return self; +} +template +at::Tensor bernoulli_p_generated_plumbing(const at::Tensor & self, double p, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bernoulli_p::call(self, p, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bilinear_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::bilinear::call(input1, input2, weight, bias); + } + Tensor input1_value; + optional input1_bdim; + std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level); + Tensor input2_value; + optional input2_bdim; + std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, weight_value, weight_bdim, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor binary_cross_entropy_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::binary_cross_entropy::call(self, target, weight, reduction); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor binary_cross_entropy_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::binary_cross_entropy_backward::call(grad_output, self, target, weight, reduction); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor binary_cross_entropy_with_logits_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(pos_weight, cur_level)) { + return at::_ops::binary_cross_entropy_with_logits::call(self, target, weight, pos_weight, reduction); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional pos_weight_value; + optional pos_weight_bdim; + if (pos_weight) { + std::tie(pos_weight_value, pos_weight_bdim) = unwrapTensorAtLevel(pos_weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, pos_weight_value, pos_weight_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bincount_generated_plumbing(const at::Tensor & self, const c10::optional & weights, int64_t minlength) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weights, cur_level)) { + return at::_ops::bincount::call(self, weights, minlength); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional weights_value; + optional weights_bdim; + if (weights) { + std::tie(weights_value, weights_bdim) = unwrapTensorAtLevel(weights.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weights_value, weights_bdim, minlength); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_not_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_not::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_not__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_not_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor copysign_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::copysign_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & copysign__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::copysign__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor copysign_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::copysign_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & copysign__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::copysign__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor _lazy_clone_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_lazy_clone::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logical_not_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logical_not::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & logical_not__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logical_not_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor logical_xor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logical_xor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & logical_xor__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logical_xor_::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor logical_and_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logical_and::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & logical_and__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logical_and_::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor logical_or_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logical_or::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & logical_or__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logical_or_::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::bmm::call(self, mat2); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector broadcast_tensors_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::broadcast_tensors::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor broadcast_to_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::broadcast_to::call(self, size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_broadcast_to_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_broadcast_to::call(self, size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cat_generated_plumbing(const at::ITensorListRef & tensors, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::cat::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::cat_names::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor concat_generated_plumbing(at::TensorList tensors, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::concat::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor concat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::concat_names::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor concatenate_generated_plumbing(at::TensorList tensors, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::concatenate::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor concatenate_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::concatenate_names::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor block_diag_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::block_diag::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ceil_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ceil::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & ceil__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ceil_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor chain_matmul_generated_plumbing(at::TensorList matrices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(matrices, cur_level)) { + return at::_ops::chain_matmul::call(matrices); + } + + auto results = batch_rule(matrices); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector unsafe_chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unsafe_chunk::call(self, chunks, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, chunks, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::chunk::call(self, chunks, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, chunks, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector tensor_split_sections_generated_plumbing(const at::Tensor & self, c10::SymInt sections, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tensor_split_sections::call(self, sections, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sections, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector tensor_split_indices_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tensor_split_indices::call(self, indices, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, indices, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector tensor_split_tensor_indices_or_sections_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor_indices_or_sections, cur_level)) { + return at::_ops::tensor_split_tensor_indices_or_sections::call(self, tensor_indices_or_sections, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor tensor_indices_or_sections_value; + optional tensor_indices_or_sections_bdim; + std::tie(tensor_indices_or_sections_value, tensor_indices_or_sections_bdim) = unwrapTensorAtLevel(tensor_indices_or_sections, cur_level); + auto results = batch_rule(self_value, self_bdim, tensor_indices_or_sections_value, tensor_indices_or_sections_bdim, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor clamp_generated_plumbing(const at::Tensor & self, const c10::optional & min, const c10::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clamp::call(self, min, max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, min, max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor clamp_Tensor_generated_plumbing(const at::Tensor & self, const c10::optional & min, const c10::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) { + return at::_ops::clamp_Tensor::call(self, min, max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional min_value; + optional min_bdim; + if (min) { + std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level); + } + optional max_value; + optional max_bdim; + if (max) { + std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & clamp__generated_plumbing(at::Tensor & self, const c10::optional & min, const c10::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clamp_::call(self, min, max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, min, max); + return self; +} +template +at::Tensor & clamp__Tensor_generated_plumbing(at::Tensor & self, const c10::optional & min, const c10::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) { + return at::_ops::clamp__Tensor::call(self, min, max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional min_value; + optional min_bdim; + if (min) { + std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level); + } + optional max_value; + optional max_bdim; + if (max) { + std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level); + } + batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim); + return self; +} +template +at::Tensor clamp_max_generated_plumbing(const at::Tensor & self, const at::Scalar & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clamp_max::call(self, max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor clamp_max_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) { + return at::_ops::clamp_max_Tensor::call(self, max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor max_value; + optional max_bdim; + std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max, cur_level); + auto results = batch_rule(self_value, self_bdim, max_value, max_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & clamp_max__generated_plumbing(at::Tensor & self, const at::Scalar & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clamp_max_::call(self, max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, max); + return self; +} +template +at::Tensor & clamp_max__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) { + return at::_ops::clamp_max__Tensor::call(self, max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor max_value; + optional max_bdim; + std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max, cur_level); + batch_rule(self_value, self_bdim, max_value, max_bdim); + return self; +} +template +at::Tensor clamp_min_generated_plumbing(const at::Tensor & self, const at::Scalar & min) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clamp_min::call(self, min); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, min); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor clamp_min_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & min) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) { + return at::_ops::clamp_min_Tensor::call(self, min); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor min_value; + optional min_bdim; + std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min, cur_level); + auto results = batch_rule(self_value, self_bdim, min_value, min_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & clamp_min__generated_plumbing(at::Tensor & self, const at::Scalar & min) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clamp_min_::call(self, min); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, min); + return self; +} +template +at::Tensor & clamp_min__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & min) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) { + return at::_ops::clamp_min__Tensor::call(self, min); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor min_value; + optional min_bdim; + std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min, cur_level); + batch_rule(self_value, self_bdim, min_value, min_bdim); + return self; +} +template +at::Tensor clip_generated_plumbing(const at::Tensor & self, const c10::optional & min, const c10::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clip::call(self, min, max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, min, max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor clip_Tensor_generated_plumbing(const at::Tensor & self, const c10::optional & min, const c10::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) { + return at::_ops::clip_Tensor::call(self, min, max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional min_value; + optional min_bdim; + if (min) { + std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level); + } + optional max_value; + optional max_bdim; + if (max) { + std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & clip__generated_plumbing(at::Tensor & self, const c10::optional & min, const c10::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clip_::call(self, min, max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, min, max); + return self; +} +template +at::Tensor & clip__Tensor_generated_plumbing(at::Tensor & self, const c10::optional & min, const c10::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) { + return at::_ops::clip__Tensor::call(self, min, max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional min_value; + optional min_bdim; + if (min) { + std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level); + } + optional max_value; + optional max_bdim; + if (max) { + std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level); + } + batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim); + return self; +} +template +at::Tensor complex_generated_plumbing(const at::Tensor & real, const at::Tensor & imag) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(real, cur_level) && !isBatchedAtLevel(imag, cur_level)) { + return at::_ops::complex::call(real, imag); + } + Tensor real_value; + optional real_bdim; + std::tie(real_value, real_bdim) = unwrapTensorAtLevel(real, cur_level); + Tensor imag_value; + optional imag_bdim; + std::tie(imag_value, imag_bdim) = unwrapTensorAtLevel(imag, cur_level); + auto results = batch_rule(real_value, real_bdim, imag_value, imag_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor polar_generated_plumbing(const at::Tensor & abs, const at::Tensor & angle) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(abs, cur_level) && !isBatchedAtLevel(angle, cur_level)) { + return at::_ops::polar::call(abs, angle); + } + Tensor abs_value; + optional abs_bdim; + std::tie(abs_value, abs_bdim) = unwrapTensorAtLevel(abs, cur_level); + Tensor angle_value; + optional angle_bdim; + std::tie(angle_value, angle_bdim) = unwrapTensorAtLevel(angle, cur_level); + auto results = batch_rule(abs_value, abs_bdim, angle_value, angle_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor constant_pad_nd_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::constant_pad_nd::call(self, pad, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, pad, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor contiguous_generated_plumbing(const at::Tensor & self, at::MemoryFormat memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::contiguous::call(self, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple convolution_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::convolution_backward::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor convolution_overrideable_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::convolution_overrideable::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple convolution_backward_overrideable_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::convolution_backward_overrideable::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _convolution_deprecated_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_convolution_deprecated::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _convolution_mode_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_convolution_mode::call(input, weight, bias, stride, padding, dilation, groups); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _convolution_double_backward_generated_plumbing(const c10::optional & ggI, const c10::optional & ggW, const c10::optional & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ggI, cur_level) && !isBatchedAtLevel(ggW, cur_level) && !isBatchedAtLevel(ggb, cur_level) && !isBatchedAtLevel(gO, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } + Tensor gO_value; + optional gO_bdim; + std::tie(gO_value, gO_bdim) = unwrapTensorAtLevel(gO, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional ggI_value; + optional ggI_bdim; + if (ggI) { + std::tie(ggI_value, ggI_bdim) = unwrapTensorAtLevel(ggI.value(), cur_level); + } + optional ggW_value; + optional ggW_bdim; + if (ggW) { + std::tie(ggW_value, ggW_bdim) = unwrapTensorAtLevel(ggW.value(), cur_level); + } + optional ggb_value; + optional ggb_bdim; + if (ggb) { + std::tie(ggb_value, ggb_bdim) = unwrapTensorAtLevel(ggb.value(), cur_level); + } + auto results = batch_rule(ggI_value, ggI_bdim, ggW_value, ggW_bdim, ggb_value, ggb_bdim, gO_value, gO_bdim, weight_value, weight_bdim, self_value, self_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor conv1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv1d::call(input, weight, bias, stride, padding, dilation, groups); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv2d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv2d::call(input, weight, bias, stride, padding, dilation, groups); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv3d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv3d::call(input, weight, bias, stride, padding, dilation, groups); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv1d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv1d_padding::call(input, weight, bias, stride, padding, dilation, groups); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv2d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv2d_padding::call(input, weight, bias, stride, padding, dilation, groups); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv3d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv3d_padding::call(input, weight, bias, stride, padding, dilation, groups); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv_tbc_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv_tbc::call(self, weight, bias, pad); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor bias_value; + optional bias_bdim; + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level); + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple conv_tbc_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv_tbc_backward::call(self, input, weight, bias, pad); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor bias_value; + optional bias_bdim; + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level); + auto results = batch_rule(self_value, self_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor conv_transpose1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv_transpose1d::call(input, weight, bias, stride, padding, output_padding, groups, dilation); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv_transpose2d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv_transpose2d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv_transpose3d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv_transpose3d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor copy_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::copy::call(self, src, non_blocking); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & copy__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::copy_::call(self, src, non_blocking); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking); + return self; +} +template +at::Tensor _copy_from_generated_plumbing(const at::Tensor & self, const at::Tensor & dst, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) { + return at::_ops::_copy_from::call(self, dst, non_blocking); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor dst_value; + optional dst_bdim; + std::tie(dst_value, dst_bdim) = unwrapTensorAtLevel(dst, cur_level); + auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _copy_from_and_resize_generated_plumbing(const at::Tensor & self, const at::Tensor & dst) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) { + return at::_ops::_copy_from_and_resize::call(self, dst); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor dst_value; + optional dst_bdim; + std::tie(dst_value, dst_bdim) = unwrapTensorAtLevel(dst, cur_level); + auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cos_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cos::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & cos__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cos_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor cosh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cosh::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & cosh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cosh_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor cosine_embedding_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::cosine_embedding_loss::call(input1, input2, target, margin, reduction); + } + Tensor input1_value; + optional input1_bdim; + std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level); + Tensor input2_value; + optional input2_bdim; + std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor count_nonzero_dim_IntList_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::count_nonzero_dim_IntList::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor count_nonzero_generated_plumbing(const at::Tensor & self, c10::optional dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::count_nonzero::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cov_generated_plumbing(const at::Tensor & self, int64_t correction, const c10::optional & fweights, const c10::optional & aweights) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(fweights, cur_level) && !isBatchedAtLevel(aweights, cur_level)) { + return at::_ops::cov::call(self, correction, fweights, aweights); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional fweights_value; + optional fweights_bdim; + if (fweights) { + std::tie(fweights_value, fweights_bdim) = unwrapTensorAtLevel(fweights.value(), cur_level); + } + optional aweights_value; + optional aweights_bdim; + if (aweights) { + std::tie(aweights_value, aweights_bdim) = unwrapTensorAtLevel(aweights.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, correction, fweights_value, fweights_bdim, aweights_value, aweights_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor corrcoef_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::corrcoef::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cudnn_affine_grid_generator_generated_plumbing(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(theta, cur_level)) { + return at::_ops::cudnn_affine_grid_generator::call(theta, N, C, H, W); + } + Tensor theta_value; + optional theta_bdim; + std::tie(theta_value, theta_bdim) = unwrapTensorAtLevel(theta, cur_level); + auto results = batch_rule(theta_value, theta_bdim, N, C, H, W); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cudnn_affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level)) { + return at::_ops::cudnn_affine_grid_generator_backward::call(grad, N, C, H, W); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + auto results = batch_rule(grad_value, grad_bdim, N, C, H, W); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple cudnn_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::cudnn_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + optional running_mean_value; + optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + optional running_var_value; + optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple cudnn_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const at::Tensor & reserveSpace) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level) && !isBatchedAtLevel(reserveSpace, cur_level)) { + return at::_ops::cudnn_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor reserveSpace_value; + optional reserveSpace_bdim; + std::tie(reserveSpace_value, reserveSpace_bdim) = unwrapTensorAtLevel(reserveSpace, cur_level); + optional running_mean_value; + optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + optional running_var_value; + optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + optional save_mean_value; + optional save_mean_bdim; + if (save_mean) { + std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level); + } + optional save_var_value; + optional save_var_bdim; + if (save_var) { + std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon, reserveSpace_value, reserveSpace_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor cudnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::cudnn_convolution::call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cudnn_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::cudnn_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _mps_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_mps_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple mps_convolution_transpose_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor cudnn_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::cudnn_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cudnn_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::cudnn_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor z_value; + optional z_bdim; + std::tie(z_value, z_bdim) = unwrapTensorAtLevel(z, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cudnn_grid_sampler_generated_plumbing(const at::Tensor & self, const at::Tensor & grid) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::cudnn_grid_sampler::call(self, grid); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor grid_value; + optional grid_bdim; + std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple cudnn_grid_sampler_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level) && !isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::cudnn_grid_sampler_backward::call(self, grid, grad_output); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor grid_value; + optional grid_bdim; + std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level); + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim, grad_output_value, grad_output_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple cummax_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cummax::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple cummax_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cummax_dimname::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +void _cummax_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::_cummax_helper::call(self, values, indices, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim); +} +template +::std::tuple cummin_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cummin::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple cummin_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cummin_dimname::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +void _cummin_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::_cummin_helper::call(self, values, indices, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim); +} +template +at::Tensor cummaxmin_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::cummaxmin_backward::call(grad, input, indices, dim); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, indices_value, indices_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cumprod_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumprod::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & cumprod__generated_plumbing(at::Tensor & self, int64_t dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumprod_::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dim, dtype); + return self; +} +template +at::Tensor cumprod_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumprod_dimname::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & cumprod__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumprod__dimname::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dim, dtype); + return self; +} +template +at::Tensor cumprod_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::cumprod_backward::call(grad, input, dim, output); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, dim, output_value, output_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cumsum_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumsum::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & cumsum__generated_plumbing(at::Tensor & self, int64_t dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumsum_::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dim, dtype); + return self; +} +template +at::Tensor cumsum_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumsum_dimname::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & cumsum__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumsum__dimname::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dim, dtype); + return self; +} +template +at::Tensor cumulative_trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) { + return at::_ops::cumulative_trapezoid_x::call(y, x, dim); + } + Tensor y_value; + optional y_bdim; + std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level); + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cumulative_trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(y, cur_level)) { + return at::_ops::cumulative_trapezoid_dx::call(y, dx, dim); + } + Tensor y_value; + optional y_bdim; + std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level); + auto results = batch_rule(y_value, y_bdim, dx, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ctc_loss_IntList_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) { + return at::_ops::ctc_loss_IntList::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); + } + Tensor log_probs_value; + optional log_probs_bdim; + std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level); + Tensor targets_value; + optional targets_bdim; + std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level); + auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, reduction, zero_infinity); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) { + return at::_ops::ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); + } + Tensor log_probs_value; + optional log_probs_bdim; + std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level); + Tensor targets_value; + optional targets_bdim; + std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level); + Tensor input_lengths_value; + optional input_lengths_bdim; + std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level); + Tensor target_lengths_value; + optional target_lengths_bdim; + std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level); + auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, reduction, zero_infinity); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) { + return at::_ops::_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity); + } + Tensor log_probs_value; + optional log_probs_bdim; + std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level); + Tensor targets_value; + optional targets_bdim; + std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level); + auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, zero_infinity); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) { + return at::_ops::_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity); + } + Tensor log_probs_value; + optional log_probs_bdim; + std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level); + Tensor targets_value; + optional targets_bdim; + std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level); + Tensor input_lengths_value; + optional input_lengths_bdim; + std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level); + Tensor target_lengths_value; + optional target_lengths_bdim; + std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level); + auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, zero_infinity); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _ctc_loss_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) { + return at::_ops::_ctc_loss_backward::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor log_probs_value; + optional log_probs_bdim; + std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level); + Tensor targets_value; + optional targets_bdim; + std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level); + Tensor neg_log_likelihood_value; + optional neg_log_likelihood_bdim; + std::tie(neg_log_likelihood_value, neg_log_likelihood_bdim) = unwrapTensorAtLevel(neg_log_likelihood, cur_level); + Tensor log_alpha_value; + optional log_alpha_bdim; + std::tie(log_alpha_value, log_alpha_bdim) = unwrapTensorAtLevel(log_alpha, cur_level); + auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _ctc_loss_backward_Tensor_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) { + return at::_ops::_ctc_loss_backward_Tensor::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor log_probs_value; + optional log_probs_bdim; + std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level); + Tensor targets_value; + optional targets_bdim; + std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level); + Tensor input_lengths_value; + optional input_lengths_bdim; + std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level); + Tensor target_lengths_value; + optional target_lengths_bdim; + std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level); + Tensor neg_log_likelihood_value; + optional neg_log_likelihood_bdim; + std::tie(neg_log_likelihood_value, neg_log_likelihood_bdim) = unwrapTensorAtLevel(neg_log_likelihood, cur_level); + Tensor log_alpha_value; + optional log_alpha_bdim; + std::tie(log_alpha_value, log_alpha_bdim) = unwrapTensorAtLevel(log_alpha, cur_level); + auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diag_embed_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::diag_embed::call(self, offset, dim1, dim2); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diagflat_generated_plumbing(const at::Tensor & self, int64_t offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::diagflat::call(self, offset); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, offset); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diagonal_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::diagonal::call(self, offset, dim1, dim2); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_diagonal_generated_plumbing(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_diagonal::call(A, offset, dim1, dim2); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, offset, dim1, dim2); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diagonal_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::diagonal_Dimname::call(self, outdim, dim1, dim2, offset); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, outdim, dim1, dim2, offset); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diagonal_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, offset, dim1, dim2); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & fill_diagonal__generated_plumbing(at::Tensor & self, const at::Scalar & fill_value, bool wrap) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fill_diagonal_::call(self, fill_value, wrap); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, fill_value, wrap); + return self; +} +template +at::Tensor diff_generated_plumbing(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(prepend, cur_level) && !isBatchedAtLevel(append, cur_level)) { + return at::_ops::diff::call(self, n, dim, prepend, append); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional prepend_value; + optional prepend_bdim; + if (prepend) { + std::tie(prepend_value, prepend_bdim) = unwrapTensorAtLevel(prepend.value(), cur_level); + } + optional append_value; + optional append_bdim; + if (append) { + std::tie(append_value, append_bdim) = unwrapTensorAtLevel(append.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, n, dim, prepend_value, prepend_bdim, append_value, append_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_scalarint_generated_plumbing(const at::Tensor & self, const c10::optional & spacing, c10::optional dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gradient_scalarint::call(self, spacing, dim, edge_order); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_scalararray_generated_plumbing(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gradient_scalararray::call(self, spacing, dim, edge_order); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gradient_array::call(self, dim, edge_order); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_scalarrayint_generated_plumbing(const at::Tensor & self, at::ArrayRef spacing, c10::optional dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gradient_scalarrayint::call(self, spacing, dim, edge_order); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_scalarrayarray_generated_plumbing(const at::Tensor & self, at::ArrayRef spacing, at::IntArrayRef dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gradient_scalarrayarray::call(self, spacing, dim, edge_order); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_tensorarrayint_generated_plumbing(const at::Tensor & self, at::TensorList spacing, c10::optional dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) { + return at::_ops::gradient_tensorarrayint::call(self, spacing, dim, edge_order); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_tensorarray_generated_plumbing(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) { + return at::_ops::gradient_tensorarray::call(self, spacing, dim, edge_order); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor div_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::div_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & div__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::div__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor div_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::div_Tensor_mode::call(self, other, rounding_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & div__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::div__Tensor_mode::call(self, other, rounding_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode); + return self; +} +template +at::Tensor div_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::div_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & div__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::div__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor div_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::div_Scalar_mode::call(self, other, rounding_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, rounding_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & div__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::div__Scalar_mode::call(self, other, rounding_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other, rounding_mode); + return self; +} +template +at::Tensor divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::divide_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::divide__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::divide_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::divide__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor divide_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::divide_Tensor_mode::call(self, other, rounding_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & divide__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::divide__Tensor_mode::call(self, other, rounding_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode); + return self; +} +template +at::Tensor divide_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::divide_Scalar_mode::call(self, other, rounding_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, rounding_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & divide__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::divide__Scalar_mode::call(self, other, rounding_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other, rounding_mode); + return self; +} +template +at::Tensor true_divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::true_divide_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & true_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::true_divide__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor true_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::true_divide_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & true_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::true_divide__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor dot_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor, cur_level)) { + return at::_ops::dot::call(self, tensor); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor tensor_value; + optional tensor_bdim; + std::tie(tensor_value, tensor_bdim) = unwrapTensorAtLevel(tensor, cur_level); + auto results = batch_rule(self_value, self_bdim, tensor_value, tensor_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor vdot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::vdot::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor einsum_generated_plumbing(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::einsum::call(equation, tensors, path); + } + + auto results = batch_rule(equation, tensors, path); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor embedding_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::embedding::call(weight, indices, padding_idx, scale_grad_by_freq, sparse); + } + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, padding_idx, scale_grad_by_freq, sparse); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor embedding_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::embedding_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq, sparse); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor embedding_dense_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & embedding_renorm__generated_plumbing(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::embedding_renorm_::call(self, indices, max_norm, norm_type); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type); + return self; +} +template +at::Tensor embedding_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::embedding_sparse_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _embedding_bag_forward_only_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::_embedding_bag_forward_only::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); + } + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + Tensor offsets_value; + optional offsets_bdim; + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level); + optional per_sample_weights_value; + optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple _rowwise_prune_generated_plumbing(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_rowwise_prune::call(weight, mask, compressed_indices_dtype); + } + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(weight_value, weight_bdim, mask_value, mask_bdim, compressed_indices_dtype); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor row_stack_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::row_stack::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset); + } + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + Tensor offsets_value; + optional offsets_bdim; + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level); + optional per_sample_weights_value; + optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple embedding_bag_padding_idx_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, c10::optional padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::embedding_bag_padding_idx::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); + } + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + Tensor offsets_value; + optional offsets_bdim; + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level); + optional per_sample_weights_value; + optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple _embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::_embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); + } + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + Tensor offsets_value; + optional offsets_bdim; + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level); + optional per_sample_weights_value; + optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +at::Tensor _embedding_bag_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + Tensor offsets_value; + optional offsets_bdim; + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level); + Tensor offset2bag_value; + optional offset2bag_bdim; + std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level); + Tensor bag_size_value; + optional bag_size_bdim; + std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level); + Tensor maximum_indices_value; + optional maximum_indices_bdim; + std::tie(maximum_indices_value, maximum_indices_bdim) = unwrapTensorAtLevel(maximum_indices, cur_level); + optional per_sample_weights_value; + optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, padding_idx); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _embedding_bag_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + Tensor offsets_value; + optional offsets_bdim; + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level); + Tensor offset2bag_value; + optional offset2bag_bdim; + std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level); + Tensor bag_size_value; + optional bag_size_bdim; + std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level); + optional per_sample_weights_value; + optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _embedding_bag_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::_embedding_bag_dense_backward::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + Tensor offset2bag_value; + optional offset2bag_bdim; + std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level); + Tensor bag_size_value; + optional bag_size_bdim; + std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level); + Tensor maximum_indices_value; + optional maximum_indices_bdim; + std::tie(maximum_indices_value, maximum_indices_bdim) = unwrapTensorAtLevel(maximum_indices, cur_level); + optional per_sample_weights_value; + optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _embedding_bag_per_sample_weights_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level)) { + return at::_ops::_embedding_bag_per_sample_weights_backward::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + Tensor offsets_value; + optional offsets_bdim; + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level); + Tensor offset2bag_value; + optional offset2bag_bdim; + std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level); + auto results = batch_rule(grad_value, grad_bdim, weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, mode, padding_idx); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor new_empty_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::new_empty::call(self, size, dtype, layout, device, pin_memory); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor new_empty_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::new_empty_strided::call(self, size, stride, dtype, layout, device, pin_memory); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, stride, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor new_full_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::new_full::call(self, size, fill_value, dtype, layout, device, pin_memory); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, fill_value, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor new_zeros_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::new_zeros::call(self, size, dtype, layout, device, pin_memory); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor new_ones_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::new_ones::call(self, size, dtype, layout, device, pin_memory); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _empty_per_channel_affine_quantized_generated_plumbing(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) { + return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format); + } + Tensor scales_value; + optional scales_bdim; + std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level); + Tensor zero_points_value; + optional zero_points_bdim; + std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level); + auto results = batch_rule(size, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +const at::Tensor & _resize_output__generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_resize_output_::call(self, size, device); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, size, device); + return self; +} +template +at::Tensor empty_quantized_generated_plumbing(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(qtensor, cur_level)) { + return at::_ops::empty_quantized::call(size, qtensor, dtype, layout, device, pin_memory, memory_format); + } + Tensor qtensor_value; + optional qtensor_bdim; + std::tie(qtensor_value, qtensor_bdim) = unwrapTensorAtLevel(qtensor, cur_level); + auto results = batch_rule(size, qtensor_value, qtensor_bdim, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor empty_like_generated_plumbing(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::empty_like::call(self, dtype, layout, device, pin_memory, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor erf_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::erf::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & erf__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::erf_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor erfc_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::erfc::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & erfc__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::erfc_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor exp_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::exp::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & exp__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::exp_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor exp2_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::exp2::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & exp2__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::exp2_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor expm1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::expm1::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & expm1__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::expm1_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor expand_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::expand::call(self, size, implicit); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, implicit); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor expand_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::expand_as::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flatten_using_ints_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::flatten_using_ints::call(self, start_dim, end_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, start_dim, end_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flatten_named_out_dim_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::flatten_named_out_dim::call(self, start_dim, end_dim, out_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flatten_using_names_generated_plumbing(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::flatten_using_names::call(self, start_dim, end_dim, out_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flatten_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::flatten_DimnameList::call(self, dims, out_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dims, out_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor unflatten_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymIntArrayRef sizes) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unflatten_int::call(self, dim, sizes); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, sizes); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor unflatten_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::SymIntArrayRef sizes, at::DimnameList names) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unflatten_Dimname::call(self, dim, sizes, names); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, sizes, names); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fill_Scalar::call(self, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::fill_Tensor::call(self, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + auto results = batch_rule(self_value, self_bdim, value_value, value_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & fill__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fill__Scalar::call(self, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, value); + return self; +} +template +at::Tensor & fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::fill__Tensor::call(self, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + batch_rule(self_value, self_bdim, value_value, value_bdim); + return self; +} +template +at::Tensor floor_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::floor::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & floor__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::floor_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor floor_divide_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::floor_divide::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & floor_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::floor_divide__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor floor_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::floor_divide_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & floor_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::floor_divide__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor frac_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::frac::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & frac__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::frac_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor full_like_generated_plumbing(const at::Tensor & self, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::full_like::call(self, fill_value, dtype, layout, device, pin_memory, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, fill_value, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor gcd_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::gcd::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & gcd__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::gcd_::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor lcm_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::lcm::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & lcm__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::lcm_::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor grid_sampler_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::grid_sampler::call(input, grid, interpolation_mode, padding_mode, align_corners); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor grid_value; + optional grid_bdim; + std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor grid_sampler_2d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::grid_sampler_2d::call(input, grid, interpolation_mode, padding_mode, align_corners); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor grid_value; + optional grid_bdim; + std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple grid_sampler_2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::grid_sampler_2d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor grid_value; + optional grid_bdim; + std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _grid_sampler_2d_cpu_fallback_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::_grid_sampler_2d_cpu_fallback::call(input, grid, interpolation_mode, padding_mode, align_corners); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor grid_value; + optional grid_bdim; + std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _grid_sampler_2d_cpu_fallback_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::_grid_sampler_2d_cpu_fallback_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor grid_value; + optional grid_bdim; + std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor grid_sampler_3d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::grid_sampler_3d::call(input, grid, interpolation_mode, padding_mode, align_corners); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor grid_value; + optional grid_bdim; + std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple grid_sampler_3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::grid_sampler_3d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor grid_value; + optional grid_bdim; + std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor hinge_embedding_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::hinge_embedding_loss::call(self, target, margin, reduction); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, margin, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor group_norm_generated_plumbing(const at::Tensor & input, int64_t num_groups, const c10::optional & weight, const c10::optional & bias, double eps, bool cudnn_enabled) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::group_norm::call(input, num_groups, weight, bias, eps, cudnn_enabled); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, num_groups, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enabled); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple native_group_norm_generated_plumbing(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::native_group_norm::call(input, weight, bias, N, C, HxW, group, eps); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, N, C, HxW, group, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple native_group_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::native_group_norm_backward::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask); + } + Tensor grad_out_value; + optional grad_out_bdim; + std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor mean_value; + optional mean_bdim; + std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level); + Tensor rstd_value; + optional rstd_bdim; + std::tie(rstd_value, rstd_bdim) = unwrapTensorAtLevel(rstd, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, N, C, HxW, group, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _fft_r2c_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fft_r2c::call(self, dim, normalization, onesided); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, normalization, onesided); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _fft_c2r_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fft_c2r::call(self, dim, normalization, last_dim_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, normalization, last_dim_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _fft_c2c_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fft_c2c::call(self, dim, normalization, forward); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, normalization, forward); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _validate_compressed_sparse_indices_generated_plumbing(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(compressed_idx, cur_level) && !isBatchedAtLevel(plain_idx, cur_level)) { + return at::_ops::_validate_compressed_sparse_indices::call(is_crow, compressed_idx, plain_idx, cdim, dim, nnz); + } + Tensor compressed_idx_value; + optional compressed_idx_bdim; + std::tie(compressed_idx_value, compressed_idx_bdim) = unwrapTensorAtLevel(compressed_idx, cur_level); + Tensor plain_idx_value; + optional plain_idx_bdim; + std::tie(plain_idx_value, plain_idx_bdim) = unwrapTensorAtLevel(plain_idx, cur_level); + batch_rule(is_crow, compressed_idx_value, compressed_idx_bdim, plain_idx_value, plain_idx_bdim, cdim, dim, nnz); +} +template +at::Tensor index_Tensor_generated_plumbing(const at::Tensor & self, const c10::List> & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::index_Tensor::call(self, indices); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, indices); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _unsafe_index_Tensor_generated_plumbing(const at::Tensor & self, const c10::List> & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::_unsafe_index_Tensor::call(self, indices); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, indices); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_copy__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_copy_::call(self, dim, index, source); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim); + return self; +} +template +at::Tensor index_copy_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_copy::call(self, dim, index, source); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_copy__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_copy__dimname::call(self, dim, index, source); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim); + return self; +} +template +at::Tensor index_copy_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_copy_dimname::call(self, dim, index, source); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_put__generated_plumbing(at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::index_put_::call(self, indices, values, accumulate); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate); + return self; +} +template +at::Tensor index_put_generated_plumbing(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::index_put::call(self, indices, values, accumulate); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _unsafe_index_put_generated_plumbing(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_unsafe_index_put::call(self, indices, values, accumulate); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & _index_put_impl__generated_plumbing(at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, bool unsafe) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_index_put_impl_::call(self, indices, values, accumulate, unsafe); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe); + return self; +} +template +at::Tensor instance_norm_generated_plumbing(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::instance_norm::call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + optional running_mean_value; + optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + optional running_var_value; + optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, use_input_stats, momentum, eps, cudnn_enabled); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isclose_generated_plumbing(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::isclose::call(self, other, rtol, atol, equal_nan); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rtol, atol, equal_nan); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isin_Tensor_Tensor_generated_plumbing(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(elements, cur_level) && !isBatchedAtLevel(test_elements, cur_level)) { + return at::_ops::isin_Tensor_Tensor::call(elements, test_elements, assume_unique, invert); + } + Tensor elements_value; + optional elements_bdim; + std::tie(elements_value, elements_bdim) = unwrapTensorAtLevel(elements, cur_level); + Tensor test_elements_value; + optional test_elements_bdim; + std::tie(test_elements_value, test_elements_bdim) = unwrapTensorAtLevel(test_elements, cur_level); + auto results = batch_rule(elements_value, elements_bdim, test_elements_value, test_elements_bdim, assume_unique, invert); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isin_Tensor_Scalar_generated_plumbing(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(elements, cur_level)) { + return at::_ops::isin_Tensor_Scalar::call(elements, test_element, assume_unique, invert); + } + Tensor elements_value; + optional elements_bdim; + std::tie(elements_value, elements_bdim) = unwrapTensorAtLevel(elements, cur_level); + auto results = batch_rule(elements_value, elements_bdim, test_element, assume_unique, invert); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isin_Scalar_Tensor_generated_plumbing(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(test_elements, cur_level)) { + return at::_ops::isin_Scalar_Tensor::call(element, test_elements, assume_unique, invert); + } + Tensor test_elements_value; + optional test_elements_bdim; + std::tie(test_elements_value, test_elements_bdim) = unwrapTensorAtLevel(test_elements, cur_level); + auto results = batch_rule(element, test_elements_value, test_elements_bdim, assume_unique, invert); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isnan_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::isnan::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isreal_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::isreal::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor kl_div_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::kl_div::call(self, target, reduction, log_target); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, log_target); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor kron_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::kron::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple kthvalue_generated_plumbing(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::kthvalue::call(self, k, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, k, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple kthvalue_dimname_generated_plumbing(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, k, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps, bool cudnn_enable) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enable); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple native_layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::native_layer_norm::call(input, normalized_shape, weight, bias, eps); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple native_layer_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask); + } + Tensor grad_out_value; + optional grad_out_bdim; + std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor mean_value; + optional mean_bdim; + std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level); + Tensor rstd_value; + optional rstd_bdim; + std::tie(rstd_value, rstd_bdim) = unwrapTensorAtLevel(rstd, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, normalized_shape, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, bias_value, bias_bdim, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor nan_to_num_generated_plumbing(const at::Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nan_to_num::call(self, nan, posinf, neginf); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, nan, posinf, neginf); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & nan_to_num__generated_plumbing(at::Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nan_to_num_::call(self, nan, posinf, neginf); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, nan, posinf, neginf); + return self; +} +template +at::Tensor linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::linear::call(input, weight, bias); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::linear_backward::call(self, grad_output, weight, output_mask); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor mkldnn_linear_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::mkldnn_linear::call(self, weight, bias); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_linear_backward_input_generated_plumbing(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::mkldnn_linear_backward_input::call(input_size, grad_output, weight); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(input_size, grad_output_value, grad_output_bdim, weight_value, weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple mkldnn_linear_backward_weights_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::mkldnn_linear_backward_weights::call(grad_output, input, weight, bias_defined); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_defined); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple mkldnn_linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::mkldnn_linear_backward::call(self, grad_output, weight, output_mask); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _cslt_compress_generated_plumbing(const at::Tensor & input) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::_cslt_compress::call(input); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cslt_sparse_mm_generated_plumbing(const at::Tensor & compressed_A, const at::Tensor & dense_B, const c10::optional & bias, const c10::optional & alpha, c10::optional out_dtype, bool transpose_result, int64_t alg_id) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(compressed_A, cur_level) && !isBatchedAtLevel(dense_B, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(alpha, cur_level)) { + return at::_ops::_cslt_sparse_mm::call(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id); + } + Tensor compressed_A_value; + optional compressed_A_bdim; + std::tie(compressed_A_value, compressed_A_bdim) = unwrapTensorAtLevel(compressed_A, cur_level); + Tensor dense_B_value; + optional dense_B_bdim; + std::tie(dense_B_value, dense_B_bdim) = unwrapTensorAtLevel(dense_B, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + optional alpha_value; + optional alpha_bdim; + if (alpha) { + std::tie(alpha_value, alpha_bdim) = unwrapTensorAtLevel(alpha.value(), cur_level); + } + auto results = batch_rule(compressed_A_value, compressed_A_bdim, dense_B_value, dense_B_bdim, bias_value, bias_bdim, alpha_value, alpha_bdim, out_dtype, transpose_result, alg_id); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_semi_structured_linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const c10::optional & bias, c10::optional activation, c10::optional out_dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(meta, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_sparse_semi_structured_linear::call(input, weight, meta, bias, activation, out_dtype); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor meta_value; + optional meta_bdim; + std::tie(meta_value, meta_bdim) = unwrapTensorAtLevel(meta, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, meta_value, meta_bdim, bias_value, bias_bdim, activation, out_dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _mixed_dtypes_linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & scale, const c10::optional & bias, c10::optional activation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_mixed_dtypes_linear::call(input, weight, scale, bias, activation); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor scale_value; + optional scale_bdim; + std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, scale_value, scale_bdim, bias_value, bias_bdim, activation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_linear_int8_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::fbgemm_linear_int8_weight_fp32_activation::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor packed_value; + optional packed_bdim; + std::tie(packed_value, packed_bdim) = unwrapTensorAtLevel(packed, cur_level); + Tensor col_offsets_value; + optional col_offsets_bdim; + std::tie(col_offsets_value, col_offsets_bdim) = unwrapTensorAtLevel(col_offsets, cur_level); + Tensor bias_value; + optional bias_bdim; + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level); + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_linear_int8_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::fbgemm_linear_int8_weight::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor packed_value; + optional packed_bdim; + std::tie(packed_value, packed_bdim) = unwrapTensorAtLevel(packed, cur_level); + Tensor col_offsets_value; + optional col_offsets_bdim; + std::tie(col_offsets_value, col_offsets_bdim) = unwrapTensorAtLevel(col_offsets, cur_level); + Tensor bias_value; + optional bias_bdim; + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level); + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_pack_gemm_matrix_fp16_generated_plumbing(const at::Tensor & input) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::fbgemm_pack_gemm_matrix_fp16::call(input); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_linear_fp16_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::fbgemm_linear_fp16_weight_fp32_activation::call(input, packed_weight, bias); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor packed_weight_value; + optional packed_weight_bdim; + std::tie(packed_weight_value, packed_weight_bdim) = unwrapTensorAtLevel(packed_weight, cur_level); + Tensor bias_value; + optional bias_bdim; + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level); + auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_linear_fp16_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::fbgemm_linear_fp16_weight::call(input, packed_weight, bias); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor packed_weight_value; + optional packed_weight_bdim; + std::tie(packed_weight_value, packed_weight_bdim) = unwrapTensorAtLevel(packed_weight, cur_level); + Tensor bias_value; + optional bias_bdim; + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level); + auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_pack_quantized_matrix_generated_plumbing(const at::Tensor & input) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::fbgemm_pack_quantized_matrix::call(input); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_pack_quantized_matrix_KN_generated_plumbing(const at::Tensor & input, int64_t K, int64_t N) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::fbgemm_pack_quantized_matrix_KN::call(input, K, N); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, K, N); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ldexp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::ldexp_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & ldexp__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::ldexp_::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor linspace_Tensor_Tensor_generated_plumbing(const at::Tensor & start, const at::Tensor & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(start, cur_level) && !isBatchedAtLevel(end, cur_level)) { + return at::_ops::linspace_Tensor_Tensor::call(start, end, steps, dtype, layout, device, pin_memory); + } + Tensor start_value; + optional start_bdim; + std::tie(start_value, start_bdim) = unwrapTensorAtLevel(start, cur_level); + Tensor end_value; + optional end_bdim; + std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level); + auto results = batch_rule(start_value, start_bdim, end_value, end_bdim, steps, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linspace_Tensor_Scalar_generated_plumbing(const at::Tensor & start, const at::Scalar & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(start, cur_level)) { + return at::_ops::linspace_Tensor_Scalar::call(start, end, steps, dtype, layout, device, pin_memory); + } + Tensor start_value; + optional start_bdim; + std::tie(start_value, start_bdim) = unwrapTensorAtLevel(start, cur_level); + auto results = batch_rule(start_value, start_bdim, end, steps, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linspace_Scalar_Tensor_generated_plumbing(const at::Scalar & start, const at::Tensor & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(end, cur_level)) { + return at::_ops::linspace_Scalar_Tensor::call(start, end, steps, dtype, layout, device, pin_memory); + } + Tensor end_value; + optional end_bdim; + std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level); + auto results = batch_rule(start, end_value, end_bdim, steps, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor log_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & log__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor log10_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log10::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & log10__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log10_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor log1p_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log1p::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & log1p__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log1p_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor log2_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log2::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & log2__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log2_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor logaddexp_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logaddexp::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logaddexp2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logaddexp2::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor xlogy_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::xlogy_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor xlogy_Scalar_Self_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::xlogy_Scalar_Self::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor xlogy_Scalar_Other_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::xlogy_Scalar_Other::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & xlogy__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::xlogy__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor & xlogy__Scalar_Other_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::xlogy__Scalar_Other::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor logspace_Tensor_Tensor_generated_plumbing(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(start, cur_level) && !isBatchedAtLevel(end, cur_level)) { + return at::_ops::logspace_Tensor_Tensor::call(start, end, steps, base, dtype, layout, device, pin_memory); + } + Tensor start_value; + optional start_bdim; + std::tie(start_value, start_bdim) = unwrapTensorAtLevel(start, cur_level); + Tensor end_value; + optional end_bdim; + std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level); + auto results = batch_rule(start_value, start_bdim, end_value, end_bdim, steps, base, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logspace_Tensor_Scalar_generated_plumbing(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(start, cur_level)) { + return at::_ops::logspace_Tensor_Scalar::call(start, end, steps, base, dtype, layout, device, pin_memory); + } + Tensor start_value; + optional start_bdim; + std::tie(start_value, start_bdim) = unwrapTensorAtLevel(start, cur_level); + auto results = batch_rule(start_value, start_bdim, end, steps, base, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logspace_Scalar_Tensor_generated_plumbing(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(end, cur_level)) { + return at::_ops::logspace_Scalar_Tensor::call(start, end, steps, base, dtype, layout, device, pin_memory); + } + Tensor end_value; + optional end_bdim; + std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level); + auto results = batch_rule(start, end_value, end_bdim, steps, base, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_softmax_int::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_softmax_Dimname::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_log_softmax::call(self, dim, half_to_float); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, half_to_float); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::_log_softmax_backward_data::call(grad_output, output, dim, input_dtype); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_logcumsumexp::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logcumsumexp::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logcumsumexp_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logcumsumexp_dimname::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logsumexp::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logsumexp_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logsumexp_names::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor margin_ranking_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::margin_ranking_loss::call(input1, input2, target, margin, reduction); + } + Tensor input1_value; + optional input1_bdim; + std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level); + Tensor input2_value; + optional input2_bdim; + std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::matmul::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple matmul_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::matmul_backward::call(grad, self, other, mask); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, other_value, other_bdim, mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::matrix_power::call(self, n); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor matrix_exp_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::matrix_exp::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor matrix_exp_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad, cur_level)) { + return at::_ops::matrix_exp_backward::call(self, grad); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + auto results = batch_rule(self_value, self_bdim, grad_value, grad_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _aminmax_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_aminmax::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _aminmax_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_aminmax_dim::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple aminmax_generated_plumbing(const at::Tensor & self, c10::optional dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::aminmax::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _compute_linear_combination_generated_plumbing(const at::Tensor & input, const at::Tensor & coefficients) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(coefficients, cur_level)) { + return at::_ops::_compute_linear_combination::call(input, coefficients); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor coefficients_value; + optional coefficients_bdim; + std::tie(coefficients_value, coefficients_bdim) = unwrapTensorAtLevel(coefficients, cur_level); + auto results = batch_rule(input_value, input_bdim, coefficients_value, coefficients_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple max_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_dim::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple max_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_names_dim::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor value_selecting_reduction_backward_generated_plumbing(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_value, grad_bdim, dim, indices_value, indices_bdim, sizes, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor amax_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::amax::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple max_pool1d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool1d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mkldnn_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) { + return at::_ops::mkldnn_max_pool2d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mkldnn_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) { + return at::_ops::mkldnn_max_pool3d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantized_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::quantized_max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantized_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::quantized_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantized_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::quantized_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mean_generated_plumbing(const at::Tensor & self, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mean::call(self, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mean_dim::call(self, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mean_names_dim::call(self, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nanmean_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nanmean::call(self, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor median_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::median::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple median_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::median_dim::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple median_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::median_names_dim::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor nanmedian_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nanmedian::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple nanmedian_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nanmedian_dim::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple nanmedian_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nanmedian_names_dim::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple min_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::min_dim::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple min_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::min_names_dim::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor amin_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::amin::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _mps_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_mps_convolution::call(self, weight, bias, padding, stride, dilation, groups); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple mps_convolution_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::mps_convolution_backward::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor mkldnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::mkldnn_convolution::call(self, weight, bias, padding, stride, dilation, groups); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple mkldnn_rnn_layer_generated_plumbing(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight0, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_, cur_level)) { + return at::_ops::mkldnn_rnn_layer::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight0_value; + optional weight0_bdim; + std::tie(weight0_value, weight0_bdim) = unwrapTensorAtLevel(weight0, cur_level); + Tensor weight1_value; + optional weight1_bdim; + std::tie(weight1_value, weight1_bdim) = unwrapTensorAtLevel(weight1, cur_level); + Tensor weight2_value; + optional weight2_bdim; + std::tie(weight2_value, weight2_bdim) = unwrapTensorAtLevel(weight2, cur_level); + Tensor weight3_value; + optional weight3_bdim; + std::tie(weight3_value, weight3_bdim) = unwrapTensorAtLevel(weight3, cur_level); + Tensor hx__value; + optional hx__bdim; + std::tie(hx__value, hx__bdim) = unwrapTensorAtLevel(hx_, cur_level); + Tensor cx__value; + optional cx__bdim; + std::tie(cx__value, cx__bdim) = unwrapTensorAtLevel(cx_, cur_level); + auto results = batch_rule(input_value, input_bdim, weight0_value, weight0_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, hx__value, hx__bdim, cx__value, cx__bdim, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple mkldnn_rnn_layer_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(weight4, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_tmp, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(hy_, cur_level) && !isBatchedAtLevel(cy_, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) { + return at::_ops::mkldnn_rnn_layer_backward::call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight1_value; + optional weight1_bdim; + std::tie(weight1_value, weight1_bdim) = unwrapTensorAtLevel(weight1, cur_level); + Tensor weight2_value; + optional weight2_bdim; + std::tie(weight2_value, weight2_bdim) = unwrapTensorAtLevel(weight2, cur_level); + Tensor weight3_value; + optional weight3_bdim; + std::tie(weight3_value, weight3_bdim) = unwrapTensorAtLevel(weight3, cur_level); + Tensor weight4_value; + optional weight4_bdim; + std::tie(weight4_value, weight4_bdim) = unwrapTensorAtLevel(weight4, cur_level); + Tensor hx__value; + optional hx__bdim; + std::tie(hx__value, hx__bdim) = unwrapTensorAtLevel(hx_, cur_level); + Tensor cx_tmp_value; + optional cx_tmp_bdim; + std::tie(cx_tmp_value, cx_tmp_bdim) = unwrapTensorAtLevel(cx_tmp, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + Tensor hy__value; + optional hy__bdim; + std::tie(hy__value, hy__bdim) = unwrapTensorAtLevel(hy_, cur_level); + Tensor cy__value; + optional cy__bdim; + std::tie(cy__value, cy__bdim) = unwrapTensorAtLevel(cy_, cur_level); + Tensor workspace_value; + optional workspace_bdim; + std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level); + optional grad_output_value; + optional grad_output_bdim; + if (grad_output) { + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level); + } + optional grad_hy_value; + optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + optional grad_cy_value; + optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, weight4_value, weight4_bdim, hx__value, hx__bdim, cx_tmp_value, cx_tmp_bdim, output_value, output_bdim, hy__value, hy__bdim, cy__value, cy__bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace_value, workspace_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level), makeBatched(std::get<12>(results), std::get<13>(results), cur_level)); +} +template +::std::tuple miopen_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::miopen_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + optional running_mean_value; + optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + optional running_var_value; + optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple miopen_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level)) { + return at::_ops::miopen_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional running_mean_value; + optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + optional running_var_value; + optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + optional save_mean_value; + optional save_mean_bdim; + if (save_mean) { + std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level); + } + optional save_var_value; + optional save_var_bdim; + if (save_var) { + std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor miopen_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::miopen_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor miopen_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::miopen_convolution_transpose::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor miopen_depthwise_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor miopen_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::miopen_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor miopen_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::miopen_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor z_value; + optional z_bdim; + std::tie(z_value, z_bdim) = unwrapTensorAtLevel(z, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple miopen_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) { + return at::_ops::miopen_rnn::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + optional cx_value; + optional cx_bdim; + if (cx) { + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level); + } + optional dropout_state_value; + optional dropout_state_bdim; + if (dropout_state) { + std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple> miopen_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) { + return at::_ops::miopen_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_buf_value; + optional weight_buf_bdim; + std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + Tensor reserve_value; + optional reserve_bdim; + std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level); + optional cx_value; + optional cx_bdim; + if (cx) { + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level); + } + optional grad_output_value; + optional grad_output_bdim; + if (grad_output) { + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level); + } + optional grad_hy_value; + optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + optional grad_cy_value; + optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + optional dropout_state_value; + optional dropout_state_bdim; + if (dropout_state) { + std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +at::Tensor mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::mm::call(self, mat2); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _int_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::_int_mm::call(self, mat2); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _convert_weight_to_int4pack_generated_plumbing(const at::Tensor & self, int64_t innerKTiles) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_convert_weight_to_int4pack::call(self, innerKTiles); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, innerKTiles); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _weight_int4pack_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScaleAndZeros) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(qScaleAndZeros, cur_level)) { + return at::_ops::_weight_int4pack_mm::call(self, mat2, qGroupSize, qScaleAndZeros); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + Tensor qScaleAndZeros_value; + optional qScaleAndZeros_bdim; + std::tie(qScaleAndZeros_value, qScaleAndZeros_bdim) = unwrapTensorAtLevel(qScaleAndZeros, cur_level); + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, qGroupSize, qScaleAndZeros_value, qScaleAndZeros_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _weight_int8pack_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(scales, cur_level)) { + return at::_ops::_weight_int8pack_mm::call(self, mat2, scales); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + Tensor scales_value; + optional scales_bdim; + std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level); + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, scales_value, scales_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_mm_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) { + return at::_ops::_sparse_mm::call(sparse, dense); + } + Tensor sparse_value; + optional sparse_bdim; + std::tie(sparse_value, sparse_bdim) = unwrapTensorAtLevel(sparse, cur_level); + Tensor dense_value; + optional dense_bdim; + std::tie(dense_value, dense_bdim) = unwrapTensorAtLevel(dense, cur_level); + auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_mm_reduce_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) { + return at::_ops::_sparse_mm_reduce::call(sparse, dense, reduce); + } + Tensor sparse_value; + optional sparse_bdim; + std::tie(sparse_value, sparse_bdim) = unwrapTensorAtLevel(sparse, cur_level); + Tensor dense_value; + optional dense_bdim; + std::tie(dense_value, dense_bdim) = unwrapTensorAtLevel(dense, cur_level); + auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim, reduce); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_sparse_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_sparse_sparse_matmul::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple mode_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mode::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple mode_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mode_dimname::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor mul_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::mul_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & mul__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::mul__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor mul_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mul_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & mul__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mul__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor multiply_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::multiply_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & multiply__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::multiply__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor multiply_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::multiply_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & multiply__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::multiply__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor mv_generated_plumbing(const at::Tensor & self, const at::Tensor & vec) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec, cur_level)) { + return at::_ops::mv::call(self, vec); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor vec_value; + optional vec_bdim; + std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level); + auto results = batch_rule(self_value, self_bdim, vec_value, vec_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mvlgamma_generated_plumbing(const at::Tensor & self, int64_t p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mvlgamma::call(self, p); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & mvlgamma__generated_plumbing(at::Tensor & self, int64_t p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mvlgamma_::call(self, p); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p); + return self; +} +template +at::Tensor narrow_copy_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::narrow_copy::call(self, dim, start, length); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, start, length); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor narrow_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::narrow::call(self, dim, start, length); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, start, length); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor narrow_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(start, cur_level)) { + return at::_ops::narrow_Tensor::call(self, dim, start, length); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor start_value; + optional start_bdim; + std::tie(start_value, start_bdim) = unwrapTensorAtLevel(start, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, start_value, start_bdim, length); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple native_batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::native_batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + optional running_mean_value; + optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + optional running_var_value; + optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _native_batch_norm_legit_no_training_generated_plumbing(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::_native_batch_norm_legit_no_training::call(input, weight, bias, running_mean, running_var, momentum, eps); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor running_mean_value; + optional running_mean_bdim; + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean, cur_level); + Tensor running_var_value; + optional running_var_bdim; + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _native_batch_norm_legit_no_stats_generated_plumbing(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_native_batch_norm_legit_no_stats::call(input, weight, bias, training, momentum, eps); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, training, momentum, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple batch_norm_stats_generated_plumbing(const at::Tensor & input, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::batch_norm_stats::call(input, eps); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor batch_norm_elemt_generated_plumbing(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level)) { + return at::_ops::batch_norm_elemt::call(input, weight, bias, mean, invstd, eps); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor mean_value; + optional mean_bdim; + std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level); + Tensor invstd_value; + optional invstd_bdim; + std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, eps); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple batch_norm_gather_stats_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, int64_t count) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::batch_norm_gather_stats::call(input, mean, invstd, running_mean, running_var, momentum, eps, count); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor mean_value; + optional mean_bdim; + std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level); + Tensor invstd_value; + optional invstd_bdim; + std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level); + optional running_mean_value; + optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + optional running_var_value; + optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, count); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple batch_norm_gather_stats_with_counts_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const at::Tensor & counts) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(counts, cur_level)) { + return at::_ops::batch_norm_gather_stats_with_counts::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor mean_value; + optional mean_bdim; + std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level); + Tensor invstd_value; + optional invstd_bdim; + std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level); + Tensor counts_value; + optional counts_bdim; + std::tie(counts_value, counts_bdim) = unwrapTensorAtLevel(counts, cur_level); + optional running_mean_value; + optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + optional running_var_value; + optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, counts_value, counts_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple native_batch_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_invstd, cur_level)) { + return at::_ops::native_batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask); + } + Tensor grad_out_value; + optional grad_out_bdim; + std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional running_mean_value; + optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + optional running_var_value; + optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + optional save_mean_value; + optional save_mean_bdim; + if (save_mean) { + std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level); + } + optional save_invstd_value; + optional save_invstd_bdim; + if (save_invstd) { + std::tie(save_invstd_value, save_invstd_bdim) = unwrapTensorAtLevel(save_invstd.value(), cur_level); + } + auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_invstd_value, save_invstd_bdim, train, eps, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple batch_norm_backward_reduce_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::batch_norm_backward_reduce::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g); + } + Tensor grad_out_value; + optional grad_out_bdim; + std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor mean_value; + optional mean_bdim; + std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level); + Tensor invstd_value; + optional invstd_bdim; + std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, input_g, weight_g, bias_g); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +at::Tensor batch_norm_backward_elemt_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(sum_dy, cur_level) && !isBatchedAtLevel(sum_dy_xmu, cur_level) && !isBatchedAtLevel(count, cur_level)) { + return at::_ops::batch_norm_backward_elemt::call(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); + } + Tensor grad_out_value; + optional grad_out_bdim; + std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor mean_value; + optional mean_bdim; + std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level); + Tensor invstd_value; + optional invstd_bdim; + std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level); + Tensor sum_dy_value; + optional sum_dy_bdim; + std::tie(sum_dy_value, sum_dy_bdim) = unwrapTensorAtLevel(sum_dy, cur_level); + Tensor sum_dy_xmu_value; + optional sum_dy_xmu_bdim; + std::tie(sum_dy_xmu_value, sum_dy_xmu_bdim) = unwrapTensorAtLevel(sum_dy_xmu, cur_level); + Tensor count_value; + optional count_bdim; + std::tie(count_value, count_bdim) = unwrapTensorAtLevel(count, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, sum_dy_value, sum_dy_bdim, sum_dy_xmu_value, sum_dy_xmu_bdim, count_value, count_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple batch_norm_update_stats_generated_plumbing(const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::batch_norm_update_stats::call(input, running_mean, running_var, momentum); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + optional running_mean_value; + optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + optional running_var_value; + optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _nnpack_spatial_convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ones_like_generated_plumbing(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ones_like::call(self, dtype, layout, device, pin_memory, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pairwise_distance_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) { + return at::_ops::pairwise_distance::call(x1, x2, p, eps, keepdim); + } + Tensor x1_value; + optional x1_bdim; + std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level); + Tensor x2_value; + optional x2_bdim; + std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level); + auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, eps, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cdist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) { + return at::_ops::cdist::call(x1, x2, p, compute_mode); + } + Tensor x1_value; + optional x1_bdim; + std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level); + Tensor x2_value; + optional x2_bdim; + std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level); + auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _euclidean_dist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) { + return at::_ops::_euclidean_dist::call(x1, x2); + } + Tensor x1_value; + optional x1_bdim; + std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level); + Tensor x2_value; + optional x2_bdim; + std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level); + auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cdist_forward_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) { + return at::_ops::_cdist_forward::call(x1, x2, p, compute_mode); + } + Tensor x1_value; + optional x1_bdim; + std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level); + Tensor x2_value; + optional x2_bdim; + std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level); + auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level) && !isBatchedAtLevel(cdist, cur_level)) { + return at::_ops::_cdist_backward::call(grad, x1, x2, p, cdist); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor x1_value; + optional x1_bdim; + std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level); + Tensor x2_value; + optional x2_bdim; + std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level); + Tensor cdist_value; + optional cdist_bdim; + std::tie(cdist_value, cdist_bdim) = unwrapTensorAtLevel(cdist, cur_level); + auto results = batch_rule(grad_value, grad_bdim, x1_value, x1_bdim, x2_value, x2_bdim, p, cdist_value, cdist_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pdist_generated_plumbing(const at::Tensor & self, double p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pdist::call(self, p); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _pdist_forward_generated_plumbing(const at::Tensor & self, double p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_pdist_forward::call(self, p); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _pdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(pdist, cur_level)) { + return at::_ops::_pdist_backward::call(grad, self, p, pdist); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor pdist_value; + optional pdist_bdim; + std::tie(pdist_value, pdist_bdim) = unwrapTensorAtLevel(pdist, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, p, pdist_value, pdist_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cosine_similarity_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) { + return at::_ops::cosine_similarity::call(x1, x2, dim, eps); + } + Tensor x1_value; + optional x1_bdim; + std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level); + Tensor x2_value; + optional x2_bdim; + std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level); + auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, dim, eps); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor permute_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::permute::call(self, dims); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor movedim_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::movedim_intlist::call(self, source, destination); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, source, destination); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor movedim_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::movedim_int::call(self, source, destination); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, source, destination); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor moveaxis_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::moveaxis_intlist::call(self, source, destination); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, source, destination); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor moveaxis_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::moveaxis_int::call(self, source, destination); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, source, destination); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor numpy_T_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::numpy_T::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor matrix_H_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::matrix_H::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mT_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mT::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mH_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mH::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor adjoint_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adjoint::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pixel_shuffle_generated_plumbing(const at::Tensor & self, int64_t upscale_factor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pixel_shuffle::call(self, upscale_factor); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, upscale_factor); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pixel_unshuffle_generated_plumbing(const at::Tensor & self, int64_t downscale_factor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pixel_unshuffle::call(self, downscale_factor); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, downscale_factor); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor channel_shuffle_generated_plumbing(const at::Tensor & self, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::channel_shuffle::call(self, groups); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor native_channel_shuffle_generated_plumbing(const at::Tensor & self, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::native_channel_shuffle::call(self, groups); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pin_memory_generated_plumbing(const at::Tensor & self, c10::optional device) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pin_memory::call(self, device); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, device); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _pin_memory_generated_plumbing(const at::Tensor & self, c10::optional device) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_pin_memory::call(self, device); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, device); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pinverse_generated_plumbing(const at::Tensor & self, double rcond) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pinverse::call(self, rcond); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, rcond); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor poisson_nll_loss_generated_plumbing(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::poisson_nll_loss::call(input, target, log_input, full, eps, reduction); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(input_value, input_bdim, target_value, target_bdim, log_input, full, eps, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rad2deg_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rad2deg::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & rad2deg__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rad2deg_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor deg2rad_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::deg2rad::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & deg2rad__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::deg2rad_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor rand_like_generated_plumbing(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rand_like::call(self, dtype, layout, device, pin_memory, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor randint_like_generated_plumbing(const at::Tensor & self, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, high, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor randint_like_low_dtype_generated_plumbing(const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, low, high, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor randn_like_generated_plumbing(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::randn_like::call(self, dtype, layout, device, pin_memory, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ravel_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ravel::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reciprocal_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::reciprocal::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & reciprocal__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::reciprocal_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor neg_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::neg::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & neg__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::neg_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor negative_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::negative::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & negative__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::negative_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor repeat_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef repeats) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::repeat::call(self, repeats); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, repeats); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor repeat_interleave_Tensor_generated_plumbing(const at::Tensor & repeats, c10::optional output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(repeats, cur_level)) { + return at::_ops::repeat_interleave_Tensor::call(repeats, output_size); + } + Tensor repeats_value; + optional repeats_bdim; + std::tie(repeats_value, repeats_bdim) = unwrapTensorAtLevel(repeats, cur_level); + auto results = batch_rule(repeats_value, repeats_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor repeat_interleave_self_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & repeats, c10::optional dim, c10::optional output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(repeats, cur_level)) { + return at::_ops::repeat_interleave_self_Tensor::call(self, repeats, dim, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor repeats_value; + optional repeats_bdim; + std::tie(repeats_value, repeats_bdim) = unwrapTensorAtLevel(repeats, cur_level); + auto results = batch_rule(self_value, self_bdim, repeats_value, repeats_bdim, dim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor repeat_interleave_self_int_generated_plumbing(const at::Tensor & self, c10::SymInt repeats, c10::optional dim, c10::optional output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, repeats, dim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reshape_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef shape) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::reshape::call(self, shape); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, shape); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _reshape_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_reshape_copy::call(self, size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _reshape_alias_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_reshape_alias::call(self, size, stride); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, stride); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _mkldnn_reshape_generated_plumbing(const at::Tensor & self, at::IntArrayRef shape) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_mkldnn_reshape::call(self, shape); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, shape); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reshape_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::reshape_as::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor round_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::round::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & round__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::round_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor round_decimals_generated_plumbing(const at::Tensor & self, int64_t decimals) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::round_decimals::call(self, decimals); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, decimals); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & round__decimals_generated_plumbing(at::Tensor & self, int64_t decimals) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::round__decimals::call(self, decimals); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, decimals); + return self; +} +template +at::Tensor rrelu_generated_plumbing(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rrelu::call(self, lower, upper, training, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, lower, upper, training, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & rrelu__generated_plumbing(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rrelu_::call(self, lower, upper, training, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, lower, upper, training, generator); + return self; +} +template +at::Tensor relu_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::relu::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & relu__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::relu_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor relu6_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::relu6::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & relu6__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::relu6_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor prelu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::prelu::call(self, weight); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _prelu_kernel_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_prelu_kernel::call(self, weight); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _prelu_kernel_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_prelu_kernel_backward::call(grad_output, self, weight); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor & gelu__generated_plumbing(at::Tensor & self, c10::string_view approximate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gelu_::call(self, approximate); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, approximate); + return self; +} +template +at::Tensor gelu_generated_plumbing(const at::Tensor & self, c10::string_view approximate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gelu::call(self, approximate); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, approximate); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor gelu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::gelu_backward::call(grad_output, self, approximate); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, approximate); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor infinitely_differentiable_gelu_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::infinitely_differentiable_gelu_backward::call(grad, self); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hardshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardshrink::call(self, lambd); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, lambd); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hardshrink_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardshrink_backward::call(grad_out, self, lambd); + } + Tensor grad_out_value; + optional grad_out_bdim; + std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_out_value, grad_out_bdim, self_value, self_bdim, lambd); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rsqrt_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rsqrt::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & rsqrt__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rsqrt_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor select_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, int64_t index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::select_Dimname::call(self, dim, index); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor select_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::select_int::call(self, dim, index); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor select_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::select_backward::call(grad_output, input_sizes, dim, index); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_select_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_select_backward::call(grad_output, self, dim, index); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim, index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor selu_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::selu::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & selu__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::selu_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor celu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::celu::call(self, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & celu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::celu_::call(self, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, alpha); + return self; +} +template +at::Tensor silu_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::silu::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & silu__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::silu_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor silu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::silu_backward::call(grad_output, self); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mish_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mish::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & mish__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mish_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor mish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::mish_backward::call(grad_output, self); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sigmoid_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sigmoid::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sigmoid__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sigmoid_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor logit_generated_plumbing(const at::Tensor & self, c10::optional eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logit::call(self, eps); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, eps); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & logit__generated_plumbing(at::Tensor & self, c10::optional eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logit_::call(self, eps); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, eps); + return self; +} +template +at::Tensor sin_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sin::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sin__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sin_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor sinc_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sinc::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sinc__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sinc_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor sinh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sinh::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sinh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sinh_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor detach_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::detach::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slice_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::slice_Tensor::call(self, dim, start, end, step); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, start, end, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slice_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::slice_backward::call(grad_output, input_sizes, dim, start, end, step); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, start, end, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slice_inverse_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::slice_inverse::call(self, src, dim, start, end, step); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, start, end, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slice_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::slice_scatter::call(self, src, dim, start, end, step); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, start, end, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor select_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::select_scatter::call(self, src, dim, index); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diagonal_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::diagonal_scatter::call(self, src, offset, dim1, dim2); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, offset, dim1, dim2); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor as_strided_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, size, stride, storage_offset); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor smm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::smm::call(self, mat2); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::softmax_int::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::softmax_Dimname::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_softmax::call(self, dim, half_to_float); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, half_to_float); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::_softmax_backward_data::call(grad_output, output, dim, input_dtype); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector unsafe_split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unsafe_split_Tensor::call(self, split_size, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_size, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::split_Tensor::call(self, split_size, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_size, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector split_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::split_sizes::call(self, split_size, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_size, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector unsafe_split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unsafe_split_with_sizes::call(self, split_sizes, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_sizes, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::split_with_sizes::call(self, split_sizes, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_sizes, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector hsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hsplit_int::call(self, sections); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sections); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector hsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hsplit_array::call(self, indices); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, indices); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector vsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::vsplit_int::call(self, sections); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sections); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector vsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::vsplit_array::call(self, indices); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, indices); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector dsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::dsplit_int::call(self, sections); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sections); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector dsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::dsplit_array::call(self, indices); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, indices); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_dim_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze_dim::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze_dimname::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze_dims::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sspaddmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::sspaddmm::call(self, mat1, mat2, beta, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat1_value; + optional mat1_bdim; + std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _chunk_cat_generated_plumbing(at::TensorList tensors, int64_t dim, int64_t num_chunks) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::_chunk_cat::call(tensors, dim, num_chunks); + } + + auto results = batch_rule(tensors, dim, num_chunks); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor stack_generated_plumbing(at::TensorList tensors, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::stack::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _stack_generated_plumbing(at::TensorList tensors, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::_stack::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hstack_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::hstack::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor vstack_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::vstack::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor dstack_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::dstack::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor stft_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool normalized, c10::optional onesided, c10::optional return_complex) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) { + return at::_ops::stft::call(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional window_value; + optional window_bdim; + if (window) { + std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, normalized, onesided, return_complex); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor stft_center_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional onesided, c10::optional return_complex) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) { + return at::_ops::stft_center::call(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional window_value; + optional window_bdim; + if (window) { + std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, pad_mode, normalized, onesided, return_complex); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor istft_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool center, bool normalized, c10::optional onesided, c10::optional length, bool return_complex) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) { + return at::_ops::istft::call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional window_value; + optional window_bdim; + if (window) { + std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, normalized, onesided, length, return_complex); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sum_generated_plumbing(const at::Tensor & self, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sum::call(self, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sum_dim_IntList_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sum_dim_IntList::call(self, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sum_dim_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sum_dim_DimnameList::call(self, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_sum_backward::call(grad, self, dim, keepdim); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nansum_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nansum::call(self, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sum_to_size_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sum_to_size::call(self, size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sqrt_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sqrt::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sqrt__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sqrt_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor square_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::square::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & square__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::square_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor std_generated_plumbing(const at::Tensor & self, bool unbiased) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std::call(self, unbiased); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, unbiased); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor std_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_dim::call(self, dim, unbiased, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor std_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_correction::call(self, dim, correction, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple std_mean_generated_plumbing(const at::Tensor & self, bool unbiased) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_mean::call(self, unbiased); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, unbiased); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple std_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_mean_dim::call(self, dim, unbiased, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple std_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_mean_correction::call(self, dim, correction, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple std_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_mean_names_dim::call(self, dim, unbiased, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple std_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_mean_correction_names::call(self, dim, correction, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor std_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_names_dim::call(self, dim, unbiased, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor std_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_correction_names::call(self, dim, correction, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor prod_generated_plumbing(const at::Tensor & self, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::prod::call(self, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor prod_dim_int_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::prod_dim_int::call(self, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor prod_dim_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::prod_dim_Dimname::call(self, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor t_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::t::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor tan_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tan::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & tan__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tan_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor tanh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tanh::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & tanh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tanh_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor tensordot_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::tensordot::call(self, other, dims_self, dims_other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims_self, dims_other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor threshold_generated_plumbing(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::threshold::call(self, threshold, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, threshold, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & threshold__generated_plumbing(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::threshold_::call(self, threshold, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, threshold, value); + return self; +} +template +at::Tensor threshold_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::threshold_backward::call(grad_output, self, threshold); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, threshold); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor tile_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tile::call(self, dims); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor transpose_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::transpose_int::call(self, dim0, dim1); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim0, dim1); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor transpose_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::transpose_Dimname::call(self, dim0, dim1); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim0, dim1); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _mkldnn_transpose_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_mkldnn_transpose::call(self, dim0, dim1); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim0, dim1); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & _mkldnn_transpose__generated_plumbing(at::Tensor & self, int64_t dim0, int64_t dim1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_mkldnn_transpose_::call(self, dim0, dim1); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dim0, dim1); + return self; +} +template +at::Tensor one_hot_generated_plumbing(const at::Tensor & self, int64_t num_classes) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::one_hot::call(self, num_classes); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, num_classes); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flip_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::flip::call(self, dims); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fliplr_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fliplr::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flipud_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::flipud::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor roll_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::roll::call(self, shifts, dims); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, shifts, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rot90_generated_plumbing(const at::Tensor & self, int64_t k, at::IntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rot90::call(self, k, dims); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, k, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) { + return at::_ops::trapezoid_x::call(y, x, dim); + } + Tensor y_value; + optional y_bdim; + std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level); + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(y, cur_level)) { + return at::_ops::trapezoid_dx::call(y, dx, dim); + } + Tensor y_value; + optional y_bdim; + std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level); + auto results = batch_rule(y_value, y_bdim, dx, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trapz_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) { + return at::_ops::trapz_x::call(y, x, dim); + } + Tensor y_value; + optional y_bdim; + std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level); + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trapz_dx_generated_plumbing(const at::Tensor & y, double dx, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(y, cur_level)) { + return at::_ops::trapz_dx::call(y, dx, dim); + } + Tensor y_value; + optional y_bdim; + std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level); + auto results = batch_rule(y_value, y_bdim, dx, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _transform_bias_rescale_qkv_generated_plumbing(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(qkv, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level)) { + return at::_ops::_transform_bias_rescale_qkv::call(qkv, qkv_bias, num_heads); + } + Tensor qkv_value; + optional qkv_bdim; + std::tie(qkv_value, qkv_bdim) = unwrapTensorAtLevel(qkv, cur_level); + Tensor qkv_bias_value; + optional qkv_bias_bdim; + std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level); + auto results = batch_rule(qkv_value, qkv_bdim, qkv_bias_value, qkv_bias_bdim, num_heads); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _nested_tensor_from_mask_generated_plumbing(const at::Tensor & t, const at::Tensor & mask, bool mask_check) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(t, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_nested_tensor_from_mask::call(t, mask, mask_check); + } + Tensor t_value; + optional t_bdim; + std::tie(t_value, t_bdim) = unwrapTensorAtLevel(t, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(t_value, t_bdim, mask_value, mask_bdim, mask_check); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_from_padded_generated_plumbing(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(cpu_nested_shape_example, cur_level)) { + return at::_ops::_nested_from_padded::call(padded, cpu_nested_shape_example, fuse_transform_0213); + } + Tensor padded_value; + optional padded_bdim; + std::tie(padded_value, padded_bdim) = unwrapTensorAtLevel(padded, cur_level); + Tensor cpu_nested_shape_example_value; + optional cpu_nested_shape_example_bdim; + std::tie(cpu_nested_shape_example_value, cpu_nested_shape_example_bdim) = unwrapTensorAtLevel(cpu_nested_shape_example, cur_level); + auto results = batch_rule(padded_value, padded_bdim, cpu_nested_shape_example_value, cpu_nested_shape_example_bdim, fuse_transform_0213); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_tensor_size_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_tensor_size::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_tensor_strides_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_tensor_strides::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_tensor_storage_offsets_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_tensor_storage_offsets::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_from_padded_and_nested_example_generated_plumbing(const at::Tensor & padded, const at::Tensor & nt_example) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(nt_example, cur_level)) { + return at::_ops::_nested_from_padded_and_nested_example::call(padded, nt_example); + } + Tensor padded_value; + optional padded_bdim; + std::tie(padded_value, padded_bdim) = unwrapTensorAtLevel(padded, cur_level); + Tensor nt_example_value; + optional nt_example_bdim; + std::tie(nt_example_value, nt_example_bdim) = unwrapTensorAtLevel(nt_example, cur_level); + auto results = batch_rule(padded_value, padded_bdim, nt_example_value, nt_example_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_view_from_buffer_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level) && !isBatchedAtLevel(offsets, cur_level)) { + return at::_ops::_nested_view_from_buffer::call(self, nested_size, nested_strides, offsets); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor nested_size_value; + optional nested_size_bdim; + std::tie(nested_size_value, nested_size_bdim) = unwrapTensorAtLevel(nested_size, cur_level); + Tensor nested_strides_value; + optional nested_strides_bdim; + std::tie(nested_strides_value, nested_strides_bdim) = unwrapTensorAtLevel(nested_strides, cur_level); + Tensor offsets_value; + optional offsets_bdim; + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level); + auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets_value, offsets_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_view_from_buffer_copy_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level) && !isBatchedAtLevel(offsets, cur_level)) { + return at::_ops::_nested_view_from_buffer_copy::call(self, nested_size, nested_strides, offsets); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor nested_size_value; + optional nested_size_bdim; + std::tie(nested_size_value, nested_size_bdim) = unwrapTensorAtLevel(nested_size, cur_level); + Tensor nested_strides_value; + optional nested_strides_bdim; + std::tie(nested_strides_value, nested_strides_bdim) = unwrapTensorAtLevel(nested_strides, cur_level); + Tensor offsets_value; + optional offsets_bdim; + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level); + auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets_value, offsets_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_view_from_jagged_generated_plumbing(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional & lengths, int64_t ragged_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(dummy, cur_level) && !isBatchedAtLevel(lengths, cur_level)) { + return at::_ops::_nested_view_from_jagged::call(self, offsets, dummy, lengths, ragged_idx); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor offsets_value; + optional offsets_bdim; + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level); + Tensor dummy_value; + optional dummy_bdim; + std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level); + optional lengths_value; + optional lengths_bdim; + if (lengths) { + std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, offsets_value, offsets_bdim, dummy_value, dummy_bdim, lengths_value, lengths_bdim, ragged_idx); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_view_from_jagged_copy_generated_plumbing(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional & lengths, int64_t ragged_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(dummy, cur_level) && !isBatchedAtLevel(lengths, cur_level)) { + return at::_ops::_nested_view_from_jagged_copy::call(self, offsets, dummy, lengths, ragged_idx); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor offsets_value; + optional offsets_bdim; + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level); + Tensor dummy_value; + optional dummy_bdim; + std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level); + optional lengths_value; + optional lengths_bdim; + if (lengths) { + std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, offsets_value, offsets_bdim, dummy_value, dummy_bdim, lengths_value, lengths_bdim, ragged_idx); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_get_values_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_get_values::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_get_values_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_get_values_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_get_offsets_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_get_offsets::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_get_lengths_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_get_lengths::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_get_jagged_dummy_generated_plumbing(const at::Tensor & any) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(any, cur_level)) { + return at::_ops::_nested_get_jagged_dummy::call(any); + } + Tensor any_value; + optional any_bdim; + std::tie(any_value, any_bdim) = unwrapTensorAtLevel(any, cur_level); + auto results = batch_rule(any_value, any_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _trilinear_generated_plumbing(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(i1, cur_level) && !isBatchedAtLevel(i2, cur_level) && !isBatchedAtLevel(i3, cur_level)) { + return at::_ops::_trilinear::call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim); + } + Tensor i1_value; + optional i1_bdim; + std::tie(i1_value, i1_bdim) = unwrapTensorAtLevel(i1, cur_level); + Tensor i2_value; + optional i2_bdim; + std::tie(i2_value, i2_bdim) = unwrapTensorAtLevel(i2, cur_level); + Tensor i3_value; + optional i3_bdim; + std::tie(i3_value, i3_bdim) = unwrapTensorAtLevel(i3, cur_level); + auto results = batch_rule(i1_value, i1_bdim, i2_value, i2_bdim, i3_value, i3_bdim, expand1, expand2, expand3, sumdim, unroll_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor triplet_margin_loss_generated_plumbing(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(anchor, cur_level) && !isBatchedAtLevel(positive, cur_level) && !isBatchedAtLevel(negative, cur_level)) { + return at::_ops::triplet_margin_loss::call(anchor, positive, negative, margin, p, eps, swap, reduction); + } + Tensor anchor_value; + optional anchor_bdim; + std::tie(anchor_value, anchor_bdim) = unwrapTensorAtLevel(anchor, cur_level); + Tensor positive_value; + optional positive_bdim; + std::tie(positive_value, positive_bdim) = unwrapTensorAtLevel(positive, cur_level); + Tensor negative_value; + optional negative_bdim; + std::tie(negative_value, negative_bdim) = unwrapTensorAtLevel(negative, cur_level); + auto results = batch_rule(anchor_value, anchor_bdim, positive_value, positive_bdim, negative_value, negative_bdim, margin, p, eps, swap, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trunc_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::trunc::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & trunc__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::trunc_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor fix_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fix::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & fix__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fix_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor type_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::type_as::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _unique_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_unique::call(self, sorted, return_inverse); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sorted, return_inverse); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple unique_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unique_dim::call(self, dim, sorted, return_inverse, return_counts); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, sorted, return_inverse, return_counts); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple unique_consecutive_generated_plumbing(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unique_consecutive::call(self, return_inverse, return_counts, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, return_inverse, return_counts, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple unique_dim_consecutive_generated_plumbing(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unique_dim_consecutive::call(self, dim, return_inverse, return_counts); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, return_inverse, return_counts); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _unique2_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_unique2::call(self, sorted, return_inverse, return_counts); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sorted, return_inverse, return_counts); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _unsafe_view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_unsafe_view::call(self, size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor unsqueeze_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unsqueeze::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor vander_generated_plumbing(const at::Tensor & x, c10::optional N, bool increasing) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::vander::call(x, N, increasing); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, N, increasing); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor var_generated_plumbing(const at::Tensor & self, bool unbiased) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var::call(self, unbiased); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, unbiased); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor var_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_dim::call(self, dim, unbiased, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor var_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_correction::call(self, dim, correction, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor var_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_names_dim::call(self, dim, unbiased, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor var_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_correction_names::call(self, dim, correction, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple var_mean_generated_plumbing(const at::Tensor & self, bool unbiased) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_mean::call(self, unbiased); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, unbiased); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple var_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_mean_dim::call(self, dim, unbiased, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple var_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_mean_correction::call(self, dim, correction, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple var_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_mean_names_dim::call(self, dim, unbiased, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple var_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_mean_correction_names::call(self, dim, correction, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor view_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::view_as::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor where_self_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::where_self::call(condition, self, other); + } + Tensor condition_value; + optional condition_bdim; + std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor where_ScalarSelf_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::where_ScalarSelf::call(condition, self, other); + } + Tensor condition_value; + optional condition_bdim; + std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(condition_value, condition_bdim, self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor where_ScalarOther_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::where_ScalarOther::call(condition, self, other); + } + Tensor condition_value; + optional condition_bdim; + std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor where_Scalar_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(condition, cur_level)) { + return at::_ops::where_Scalar::call(condition, self, other); + } + Tensor condition_value; + optional condition_bdim; + std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level); + auto results = batch_rule(condition_value, condition_bdim, self, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector where_generated_plumbing(const at::Tensor & condition) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(condition, cur_level)) { + return at::_ops::where::call(condition); + } + Tensor condition_value; + optional condition_bdim; + std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level); + auto results = batch_rule(condition_value, condition_bdim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_except_dim_generated_plumbing(const at::Tensor & v, int64_t pow, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(v, cur_level)) { + return at::_ops::norm_except_dim::call(v, pow, dim); + } + Tensor v_value; + optional v_bdim; + std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level); + auto results = batch_rule(v_value, v_bdim, pow, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _weight_norm_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) { + return at::_ops::_weight_norm::call(v, g, dim); + } + Tensor v_value; + optional v_bdim; + std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level); + Tensor g_value; + optional g_bdim; + std::tie(g_value, g_bdim) = unwrapTensorAtLevel(g, cur_level); + auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _weight_norm_interface_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) { + return at::_ops::_weight_norm_interface::call(v, g, dim); + } + Tensor v_value; + optional v_bdim; + std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level); + Tensor g_value; + optional g_bdim; + std::tie(g_value, g_bdim) = unwrapTensorAtLevel(g, cur_level); + auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _weight_norm_interface_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) { + return at::_ops::_weight_norm_interface_backward::call(grad_w, saved_v, saved_g, saved_norms, dim); + } + Tensor grad_w_value; + optional grad_w_bdim; + std::tie(grad_w_value, grad_w_bdim) = unwrapTensorAtLevel(grad_w, cur_level); + Tensor saved_v_value; + optional saved_v_bdim; + std::tie(saved_v_value, saved_v_bdim) = unwrapTensorAtLevel(saved_v, cur_level); + Tensor saved_g_value; + optional saved_g_bdim; + std::tie(saved_g_value, saved_g_bdim) = unwrapTensorAtLevel(saved_g, cur_level); + Tensor saved_norms_value; + optional saved_norms_bdim; + std::tie(saved_norms_value, saved_norms_bdim) = unwrapTensorAtLevel(saved_norms, cur_level); + auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _weight_norm_differentiable_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) { + return at::_ops::_weight_norm_differentiable_backward::call(grad_w, saved_v, saved_g, saved_norms, dim); + } + Tensor grad_w_value; + optional grad_w_bdim; + std::tie(grad_w_value, grad_w_bdim) = unwrapTensorAtLevel(grad_w, cur_level); + Tensor saved_v_value; + optional saved_v_bdim; + std::tie(saved_v_value, saved_v_bdim) = unwrapTensorAtLevel(saved_v, cur_level); + Tensor saved_g_value; + optional saved_g_bdim; + std::tie(saved_g_value, saved_g_bdim) = unwrapTensorAtLevel(saved_g, cur_level); + Tensor saved_norms_value; + optional saved_norms_bdim; + std::tie(saved_norms_value, saved_norms_bdim) = unwrapTensorAtLevel(saved_norms, cur_level); + auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor zeros_like_generated_plumbing(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::zeros_like::call(self, dtype, layout, device, pin_memory, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _standard_gamma_grad_generated_plumbing(const at::Tensor & self, const at::Tensor & output) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::_standard_gamma_grad::call(self, output); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + auto results = batch_rule(self_value, self_bdim, output_value, output_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _standard_gamma_generated_plumbing(const at::Tensor & self, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_standard_gamma::call(self, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _dirichlet_grad_generated_plumbing(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(alpha, cur_level) && !isBatchedAtLevel(total, cur_level)) { + return at::_ops::_dirichlet_grad::call(x, alpha, total); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor alpha_value; + optional alpha_bdim; + std::tie(alpha_value, alpha_bdim) = unwrapTensorAtLevel(alpha, cur_level); + Tensor total_value; + optional total_bdim; + std::tie(total_value, total_bdim) = unwrapTensorAtLevel(total, cur_level); + auto results = batch_rule(x_value, x_bdim, alpha_value, alpha_bdim, total_value, total_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sample_dirichlet_generated_plumbing(const at::Tensor & self, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sample_dirichlet::call(self, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor poisson_generated_plumbing(const at::Tensor & self, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::poisson::call(self, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor binomial_generated_plumbing(const at::Tensor & count, const at::Tensor & prob, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(count, cur_level) && !isBatchedAtLevel(prob, cur_level)) { + return at::_ops::binomial::call(count, prob, generator); + } + Tensor count_value; + optional count_bdim; + std::tie(count_value, count_bdim) = unwrapTensorAtLevel(count, cur_level); + Tensor prob_value; + optional prob_bdim; + std::tie(prob_value, prob_bdim) = unwrapTensorAtLevel(prob, cur_level); + auto results = batch_rule(count_value, count_bdim, prob_value, prob_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor native_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::native_norm::call(self, p); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor native_norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::native_norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_sum_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_sum::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_sum_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_sum_dtype::call(self, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_sum_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_sum_dim::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_sum_dim_dtype::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_sum_backward::call(grad, self, dim); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_csr_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_csr_sum_dim_dtype::call(self, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_csr_prod_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_csr_prod_dim_dtype::call(self, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_softmax_int::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_softmax_Dimname::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_softmax::call(self, dim, half_to_float); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, half_to_float); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_softmax_backward_data::call(grad_output, output, dim, self); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_log_softmax_int::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_log_softmax_Dimname::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_log_softmax::call(self, dim, half_to_float); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, half_to_float); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_log_softmax_backward_data::call(grad_output, output, dim, self); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _spdiags_generated_plumbing(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional layout) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(diagonals, cur_level) && !isBatchedAtLevel(offsets, cur_level)) { + return at::_ops::_spdiags::call(diagonals, offsets, shape, layout); + } + Tensor diagonals_value; + optional diagonals_bdim; + std::tie(diagonals_value, diagonals_bdim) = unwrapTensorAtLevel(diagonals, cur_level); + Tensor offsets_value; + optional offsets_bdim; + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level); + auto results = batch_rule(diagonals_value, diagonals_bdim, offsets_value, offsets_bdim, shape, layout); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_ScalarOpt_dtype_generated_plumbing(const at::Tensor & self, const c10::optional & p, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::norm_ScalarOpt_dtype::call(self, p, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::norm_Scalar::call(self, p); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::norm_ScalarOpt_dim::call(self, p, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_names_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::norm_names_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_names_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::norm_names_ScalarOpt_dim::call(self, p, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple frexp_Tensor_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::frexp_Tensor::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor frobenius_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::frobenius_norm_dim::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nuclear_norm_generated_plumbing(const at::Tensor & self, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nuclear_norm::call(self, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nuclear_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nuclear_norm_dim::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor clone_generated_plumbing(const at::Tensor & self, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clone::call(self, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor positive_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::positive::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +const at::Tensor & resize_as_sparse__generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) { + return at::_ops::resize_as_sparse_::call(self, the_template); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor the_template_value; + optional the_template_bdim; + std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level); + batch_rule(self_value, self_bdim, the_template_value, the_template_bdim); + return self; +} +template +at::Tensor & zero__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::zero_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor sub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::sub_Tensor::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sub__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::sub__Tensor::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return self; +} +template +at::Tensor sub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sub_Scalar::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sub__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sub__Scalar::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other, alpha); + return self; +} +template +at::Tensor subtract_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::subtract_Tensor::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & subtract__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::subtract__Tensor::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return self; +} +template +at::Tensor subtract_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::subtract_Scalar::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & subtract__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::subtract__Scalar::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other, alpha); + return self; +} +template +at::Tensor rsub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::rsub_Tensor::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor heaviside_generated_plumbing(const at::Tensor & self, const at::Tensor & values) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::heaviside::call(self, values); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(self_value, self_bdim, values_value, values_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & heaviside__generated_plumbing(at::Tensor & self, const at::Tensor & values) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::heaviside_::call(self, values); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + batch_rule(self_value, self_bdim, values_value, values_bdim); + return self; +} +template +at::Tensor rsub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rsub_Scalar::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::_sparse_addmm::call(self, mat1, mat2, beta, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat1_value; + optional mat1_bdim; + std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_sampled_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::sparse_sampled_addmm::call(self, mat1, mat2, beta, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat1_value; + optional mat1_bdim; + std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _sparse_mm_reduce_impl_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_sparse_mm_reduce_impl::call(self, other, reduce); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, reduce); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _sparse_mm_reduce_impl_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(arg_out, cur_level)) { + return at::_ops::_sparse_mm_reduce_impl_backward::call(self, grad_out, weight, reduce, arg_out, output_mask); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor grad_out_value; + optional grad_out_bdim; + std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + Tensor arg_out_value; + optional arg_out_bdim; + std::tie(arg_out_value, arg_out_bdim) = unwrapTensorAtLevel(arg_out, cur_level); + auto results = batch_rule(self_value, self_bdim, grad_out_value, grad_out_bdim, weight_value, weight_bdim, reduce, arg_out_value, arg_out_bdim, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::addmm::call(self, mat1, mat2, beta, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat1_value; + optional mat1_bdim; + std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & addmm__generated_plumbing(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::addmm_::call(self, mat1, mat2, beta, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat1_value; + optional mat1_bdim; + std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha); + return self; +} +template +at::Tensor _addmm_activation_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::_addmm_activation::call(self, mat1, mat2, beta, alpha, use_gelu); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat1_value; + optional mat1_bdim; + std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha, use_gelu); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _scaled_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias, c10::optional out_dtype, const c10::optional & scale_a, const c10::optional & scale_b, const c10::optional & scale_result, bool use_fast_accum) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(scale_a, cur_level) && !isBatchedAtLevel(scale_b, cur_level) && !isBatchedAtLevel(scale_result, cur_level)) { + return at::_ops::_scaled_mm::call(self, mat2, bias, out_dtype, scale_a, scale_b, scale_result, use_fast_accum); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + optional scale_a_value; + optional scale_a_bdim; + if (scale_a) { + std::tie(scale_a_value, scale_a_bdim) = unwrapTensorAtLevel(scale_a.value(), cur_level); + } + optional scale_b_value; + optional scale_b_bdim; + if (scale_b) { + std::tie(scale_b_value, scale_b_bdim) = unwrapTensorAtLevel(scale_b.value(), cur_level); + } + optional scale_result_value; + optional scale_result_bdim; + if (scale_result) { + std::tie(scale_result_value, scale_result_bdim) = unwrapTensorAtLevel(scale_result.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, bias_value, bias_bdim, out_dtype, scale_a_value, scale_a_bdim, scale_b_value, scale_b_bdim, scale_result_value, scale_result_bdim, use_fast_accum); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor sparse_compressed_tensor_comp_plain_value_size_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_compressed_tensor_comp_plain_value_size::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); + } + Tensor compressed_indices_value; + optional compressed_indices_bdim; + std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level); + Tensor plain_indices_value; + optional plain_indices_bdim; + std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_csr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_csr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + Tensor crow_indices_value; + optional crow_indices_bdim; + std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level); + Tensor col_indices_value; + optional col_indices_bdim; + std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_csc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_csc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + Tensor ccol_indices_value; + optional ccol_indices_bdim; + std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level); + Tensor row_indices_value; + optional row_indices_bdim; + std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_bsr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_bsr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + Tensor crow_indices_value; + optional crow_indices_bdim; + std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level); + Tensor col_indices_value; + optional col_indices_bdim; + std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_bsc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_bsc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + Tensor ccol_indices_value; + optional ccol_indices_bdim; + std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level); + Tensor row_indices_value; + optional row_indices_bdim; + std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_compressed_tensor_comp_plain_value_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_compressed_tensor_comp_plain_value::call(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory); + } + Tensor compressed_indices_value; + optional compressed_indices_bdim; + std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level); + Tensor plain_indices_value; + optional plain_indices_bdim; + std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_csr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_csr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory); + } + Tensor crow_indices_value; + optional crow_indices_bdim; + std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level); + Tensor col_indices_value; + optional col_indices_bdim; + std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_csc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_csc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory); + } + Tensor ccol_indices_value; + optional ccol_indices_bdim; + std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level); + Tensor row_indices_value; + optional row_indices_bdim; + std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_bsr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_bsr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory); + } + Tensor crow_indices_value; + optional crow_indices_bdim; + std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level); + Tensor col_indices_value; + optional col_indices_bdim; + std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_bsc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_bsc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory); + } + Tensor ccol_indices_value; + optional ccol_indices_bdim; + std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level); + Tensor row_indices_value; + optional row_indices_bdim; + std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_compressed_tensor_unsafe_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); + } + Tensor compressed_indices_value; + optional compressed_indices_bdim; + std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level); + Tensor plain_indices_value; + optional plain_indices_bdim; + std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_csr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_csr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + Tensor crow_indices_value; + optional crow_indices_bdim; + std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level); + Tensor col_indices_value; + optional col_indices_bdim; + std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_csc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_csc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + Tensor ccol_indices_value; + optional ccol_indices_bdim; + std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level); + Tensor row_indices_value; + optional row_indices_bdim; + std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_bsr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_bsr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + Tensor crow_indices_value; + optional crow_indices_bdim; + std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level); + Tensor col_indices_value; + optional col_indices_bdim; + std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_bsc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_bsc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + Tensor ccol_indices_value; + optional ccol_indices_bdim; + std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level); + Tensor row_indices_value; + optional row_indices_bdim; + std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_coo_tensor_indices_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_coo_tensor_indices::call(indices, values, dtype, layout, device, pin_memory, is_coalesced); + } + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory, is_coalesced); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_coo_tensor_indices_size_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_coo_tensor_indices_size::call(indices, values, size, dtype, layout, device, pin_memory, is_coalesced); + } + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory, is_coalesced); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_coo_tensor_unsafe_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, dtype, layout, device, pin_memory, is_coalesced); + } + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory, is_coalesced); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _validate_sparse_coo_tensor_args_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional is_coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_validate_sparse_coo_tensor_args::call(indices, values, size, is_coalesced); + } + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, is_coalesced); +} +template +void _validate_sparse_compressed_tensor_args_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_validate_sparse_compressed_tensor_args::call(compressed_indices, plain_indices, values, size, layout); + } + Tensor compressed_indices_value; + optional compressed_indices_bdim; + std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level); + Tensor plain_indices_value; + optional plain_indices_bdim; + std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, layout); +} +template +void _validate_sparse_csr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_validate_sparse_csr_tensor_args::call(crow_indices, col_indices, values, size); + } + Tensor crow_indices_value; + optional crow_indices_bdim; + std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level); + Tensor col_indices_value; + optional col_indices_bdim; + std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size); +} +template +void _validate_sparse_csc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size); + } + Tensor ccol_indices_value; + optional ccol_indices_bdim; + std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level); + Tensor row_indices_value; + optional row_indices_bdim; + std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size); +} +template +void _validate_sparse_bsr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_validate_sparse_bsr_tensor_args::call(crow_indices, col_indices, values, size); + } + Tensor crow_indices_value; + optional crow_indices_bdim; + std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level); + Tensor col_indices_value; + optional col_indices_bdim; + std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size); +} +template +void _validate_sparse_bsc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_validate_sparse_bsc_tensor_args::call(ccol_indices, row_indices, values, size); + } + Tensor ccol_indices_value; + optional ccol_indices_bdim; + std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level); + Tensor row_indices_value; + optional row_indices_bdim; + std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size); +} +template +at::Tensor _sparse_coo_tensor_with_dims_and_tensors_generated_plumbing(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced); + } + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(sparse_dim, dense_dim, size, indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory, is_coalesced); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +const at::Tensor & sparse_resize__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sparse_resize_::call(self, size, sparse_dim, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim); + return self; +} +template +const at::Tensor & sparse_resize_and_clear__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sparse_resize_and_clear_::call(self, size, sparse_dim, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim); + return self; +} +template +at::Tensor sparse_mask_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::sparse_mask::call(self, mask); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_mask_projection_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_sparse_mask_projection::call(self, mask, accumulate_matches); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, accumulate_matches); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _to_cpu_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::_to_cpu::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_dense_generated_plumbing(const at::Tensor & self, c10::optional dtype, c10::optional masked_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_dense::call(self, dtype, masked_grad); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, masked_grad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_dense_generated_plumbing(const at::Tensor & self, c10::optional dtype, c10::optional masked_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_dense::call(self, dtype, masked_grad); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, masked_grad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, c10::optional masked_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) { + return at::_ops::to_dense_backward::call(grad, input, masked_grad); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, masked_grad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor coalesce_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::coalesce::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _coalesce_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_coalesce::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _indices_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_indices::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _values_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_values::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & _coalesced__generated_plumbing(at::Tensor & self, bool coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_coalesced_::call(self, coalesced); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, coalesced); + return self; +} +template +at::Tensor indices_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::indices::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor values_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::values::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor crow_indices_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::crow_indices::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor col_indices_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::col_indices::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ccol_indices_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ccol_indices::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor row_indices_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::row_indices::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hspmm_generated_plumbing(const at::Tensor & mat1, const at::Tensor & mat2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::hspmm::call(mat1, mat2); + } + Tensor mat1_value; + optional mat1_bdim; + std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level); + Tensor mat2_value; + optional mat2_bdim; + std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(mat1_value, mat1_bdim, mat2_value, mat2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & copy_sparse_to_sparse__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::copy_sparse_to_sparse_::call(self, src, non_blocking); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking); + return self; +} +template +::std::vector unbind_int_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unbind_int::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector unbind_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unbind_Dimname::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_sparse_sparse_dim_generated_plumbing(const at::Tensor & self, int64_t sparse_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_sparse_sparse_dim::call(self, sparse_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sparse_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_sparse_sparse_dim_generated_plumbing(const at::Tensor & self, int64_t sparse_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_sparse_sparse_dim::call(self, sparse_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sparse_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_sparse_generated_plumbing(const at::Tensor & self, c10::optional layout, at::OptionalIntArrayRef blocksize, c10::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_sparse::call(self, layout, blocksize, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, layout, blocksize, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_sparse_generated_plumbing(const at::Tensor & self, c10::optional layout, at::OptionalIntArrayRef blocksize, c10::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_sparse::call(self, layout, blocksize, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, layout, blocksize, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_sparse_csr_generated_plumbing(const at::Tensor & self, c10::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_sparse_csr::call(self, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_sparse_csr_generated_plumbing(const at::Tensor & self, c10::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_sparse_csr::call(self, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_sparse_csc_generated_plumbing(const at::Tensor & self, c10::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_sparse_csc::call(self, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_sparse_csc_generated_plumbing(const at::Tensor & self, c10::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_sparse_csc::call(self, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_sparse_bsr_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_sparse_bsr::call(self, blocksize, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_sparse_bsr_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_sparse_bsr::call(self, blocksize, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_sparse_bsc_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_sparse_bsc::call(self, blocksize, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_sparse_bsc_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_sparse_bsc::call(self, blocksize, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _to_sparse_semi_structured_generated_plumbing(const at::Tensor & dense) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dense, cur_level)) { + return at::_ops::_to_sparse_semi_structured::call(dense); + } + Tensor dense_value; + optional dense_bdim; + std::tie(dense_value, dense_bdim) = unwrapTensorAtLevel(dense, cur_level); + auto results = batch_rule(dense_value, dense_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor to_mkldnn_generated_plumbing(const at::Tensor & self, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_mkldnn::call(self, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_reorder_conv2d_weight_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mkldnn_reorder_conv2d_weight::call(self, padding, stride, dilation, groups, input_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups, input_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_reorder_conv3d_weight_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mkldnn_reorder_conv3d_weight::call(self, padding, stride, dilation, groups); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_mkldnn_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) { + return at::_ops::to_mkldnn_backward::call(grad, input); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantize_per_tensor_dynamic_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::quantize_per_tensor_dynamic::call(self, dtype, reduce_range); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, reduce_range); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantize_per_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::quantize_per_tensor::call(self, scale, zero_point, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, scale, zero_point, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantize_per_tensor_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::quantize_per_tensor_tensor_qparams::call(self, scale, zero_point, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor scale_value; + optional scale_bdim; + std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level); + Tensor zero_point_value; + optional zero_point_bdim; + std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector quantize_per_tensor_tensors_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) { + return at::_ops::quantize_per_tensor_tensors::call(tensors, scales, zero_points, dtype); + } + Tensor scales_value; + optional scales_bdim; + std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level); + Tensor zero_points_value; + optional zero_points_bdim; + std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level); + auto results = batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantize_per_channel_generated_plumbing(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) { + return at::_ops::quantize_per_channel::call(self, scales, zero_points, axis, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor scales_value; + optional scales_bdim; + std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level); + Tensor zero_points_value; + optional zero_points_bdim; + std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level); + auto results = batch_rule(self_value, self_bdim, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor dequantize_self_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::dequantize_self::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector dequantize_tensors_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::dequantize_tensors::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor q_per_channel_scales_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::q_per_channel_scales::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor q_per_channel_zero_points_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::q_per_channel_zero_points::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor int_repr_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::int_repr::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _make_per_tensor_quantized_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_make_per_tensor_quantized_tensor::call(self, scale, zero_point); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, scale, zero_point); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _make_per_channel_quantized_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::_make_per_channel_quantized_tensor::call(self, scale, zero_point, axis); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor scale_value; + optional scale_bdim; + std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level); + Tensor zero_point_value; + optional zero_point_bdim; + std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fake_quantize_per_tensor_affine_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fake_quantize_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fake_quantize_per_tensor_affine_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::fake_quantize_per_tensor_affine_tensor_qparams::call(self, scale, zero_point, quant_min, quant_max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor scale_value; + optional scale_bdim; + std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level); + Tensor zero_point_value; + optional zero_point_bdim; + std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple fake_quantize_per_tensor_affine_cachemask_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fake_quantize_per_tensor_affine_cachemask::call(self, scale, zero_point, quant_min, quant_max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level) && !isBatchedAtLevel(fake_quant_enabled, cur_level)) { + return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor scale_value; + optional scale_bdim; + std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level); + Tensor zero_point_value; + optional zero_point_bdim; + std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level); + Tensor fake_quant_enabled_value; + optional fake_quant_enabled_bdim; + std::tie(fake_quant_enabled_value, fake_quant_enabled_bdim) = unwrapTensorAtLevel(fake_quant_enabled, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, fake_quant_enabled_value, fake_quant_enabled_bdim, quant_min, quant_max); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor fake_quantize_per_tensor_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::fake_quantize_per_tensor_affine_cachemask_backward::call(grad, mask); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _fake_quantize_learnable_per_tensor_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::_fake_quantize_learnable_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max, grad_factor); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor scale_value; + optional scale_bdim; + std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level); + Tensor zero_point_value; + optional zero_point_bdim; + std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _fake_quantize_learnable_per_tensor_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::_fake_quantize_learnable_per_tensor_affine_backward::call(grad, self, scale, zero_point, quant_min, quant_max, grad_factor); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor scale_value; + optional scale_bdim; + std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level); + Tensor zero_point_value; + optional zero_point_bdim; + std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor fake_quantize_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::fake_quantize_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor scale_value; + optional scale_bdim; + std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level); + Tensor zero_point_value; + optional zero_point_bdim; + std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple fake_quantize_per_channel_affine_cachemask_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::fake_quantize_per_channel_affine_cachemask::call(self, scale, zero_point, axis, quant_min, quant_max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor scale_value; + optional scale_bdim; + std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level); + Tensor zero_point_value; + optional zero_point_bdim; + std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor fake_quantize_per_channel_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::fake_quantize_per_channel_affine_cachemask_backward::call(grad, mask); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _fake_quantize_learnable_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::_fake_quantize_learnable_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor scale_value; + optional scale_bdim; + std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level); + Tensor zero_point_value; + optional zero_point_bdim; + std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _fake_quantize_learnable_per_channel_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::_fake_quantize_learnable_per_channel_affine_backward::call(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor scale_value; + optional scale_bdim; + std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level); + Tensor zero_point_value; + optional zero_point_bdim; + std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _saturate_weight_to_fp16_generated_plumbing(const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_saturate_weight_to_fp16::call(weight); + } + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(weight_value, weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple choose_qparams_optimized_generated_plumbing(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::choose_qparams_optimized::call(input, numel, n_bins, ratio, bit_width); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, numel, n_bins, ratio, bit_width); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _autocast_to_reduced_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_autocast_to_reduced_precision::call(self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _autocast_to_full_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_autocast_to_full_precision::call(self, cuda_enabled, cpu_enabled); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_copy_generated_plumbing(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_copy::call(self, dtype, layout, device, pin_memory, non_blocking, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_dtype_layout_generated_plumbing(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, bool copy, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_dtype_layout::call(self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, copy, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_device_generated_plumbing(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_device::call(self, device, dtype, non_blocking, copy, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, device, dtype, non_blocking, copy, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_dtype::call(self, dtype, non_blocking, copy, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, non_blocking, copy, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::to_other::call(self, other, non_blocking, copy, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, non_blocking, copy, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector meshgrid_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::meshgrid::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector meshgrid_indexing_generated_plumbing(at::TensorList tensors, c10::string_view indexing) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::meshgrid_indexing::call(tensors, indexing); + } + + auto results = batch_rule(tensors, indexing); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cartesian_prod_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::cartesian_prod::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor combinations_generated_plumbing(const at::Tensor & self, int64_t r, bool with_replacement) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::combinations::call(self, r, with_replacement); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, r, with_replacement); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _lstm_mps_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::_lstm_mps::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level)); +} +template +::std::tuple,::std::vector> lstm_mps_backward_generated_plumbing(const c10::optional & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(layersOutputs, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::lstm_mps_backward::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + Tensor z_state_value; + optional z_state_bdim; + std::tie(z_state_value, z_state_bdim) = unwrapTensorAtLevel(z_state, cur_level); + Tensor cell_state_fwd_value; + optional cell_state_fwd_bdim; + std::tie(cell_state_fwd_value, cell_state_fwd_bdim) = unwrapTensorAtLevel(cell_state_fwd, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor layersOutputs_value; + optional layersOutputs_bdim; + std::tie(layersOutputs_value, layersOutputs_bdim) = unwrapTensorAtLevel(layersOutputs, cur_level); + optional grad_y_value; + optional grad_y_bdim; + if (grad_y) { + std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y.value(), cur_level); + } + optional grad_hy_value; + optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + optional grad_cy_value; + optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + auto results = batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, layersOutputs_value, layersOutputs_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _thnn_fused_lstm_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional & input_bias, const c10::optional & hidden_bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) { + return at::_ops::_thnn_fused_lstm_cell::call(input_gates, hidden_gates, cx, input_bias, hidden_bias); + } + Tensor input_gates_value; + optional input_gates_bdim; + std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level); + Tensor hidden_gates_value; + optional hidden_gates_bdim; + std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level); + Tensor cx_value; + optional cx_bdim; + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level); + optional input_bias_value; + optional input_bias_bdim; + if (input_bias) { + std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level); + } + optional hidden_bias_value; + optional hidden_bias_bdim; + if (hidden_bias) { + std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level); + } + auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, cx_value, cx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _thnn_fused_lstm_cell_backward_impl_generated_plumbing(const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) { + return at::_ops::_thnn_fused_lstm_cell_backward_impl::call(grad_hy, grad_cy, cx, cy, workspace, has_bias); + } + Tensor cx_value; + optional cx_bdim; + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level); + Tensor cy_value; + optional cy_bdim; + std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level); + Tensor workspace_value; + optional workspace_bdim; + std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level); + optional grad_hy_value; + optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + optional grad_cy_value; + optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _thnn_fused_lstm_cell_backward_generated_plumbing(const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) { + return at::_ops::_thnn_fused_lstm_cell_backward::call(grad_hy, grad_cy, cx, cy, workspace, has_bias); + } + Tensor cx_value; + optional cx_bdim; + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level); + Tensor cy_value; + optional cy_bdim; + std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level); + Tensor workspace_value; + optional workspace_bdim; + std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level); + optional grad_hy_value; + optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + optional grad_cy_value; + optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple _thnn_differentiable_lstm_cell_backward_generated_plumbing(const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional & input_bias, const c10::optional & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level)) { + return at::_ops::_thnn_differentiable_lstm_cell_backward::call(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy); + } + Tensor input_gates_value; + optional input_gates_bdim; + std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level); + Tensor hidden_gates_value; + optional hidden_gates_bdim; + std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level); + Tensor cx_value; + optional cx_bdim; + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level); + Tensor cy_value; + optional cy_bdim; + std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level); + optional grad_hy_value; + optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + optional grad_cy_value; + optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + optional input_bias_value; + optional input_bias_bdim; + if (input_bias) { + std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level); + } + optional hidden_bias_value; + optional hidden_bias_bdim; + if (hidden_bias) { + std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level); + } + auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim, cx_value, cx_bdim, cy_value, cy_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple _thnn_fused_gru_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) { + return at::_ops::_thnn_fused_gru_cell::call(input_gates, hidden_gates, hx, input_bias, hidden_bias); + } + Tensor input_gates_value; + optional input_gates_bdim; + std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level); + Tensor hidden_gates_value; + optional hidden_gates_bdim; + std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + optional input_bias_value; + optional input_bias_bdim; + if (input_bias) { + std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level); + } + optional hidden_bias_value; + optional hidden_bias_bdim; + if (hidden_bias) { + std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level); + } + auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _thnn_fused_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) { + return at::_ops::_thnn_fused_gru_cell_backward::call(grad_hy, workspace, has_bias); + } + Tensor grad_hy_value; + optional grad_hy_bdim; + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy, cur_level); + Tensor workspace_value; + optional workspace_bdim; + std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level); + auto results = batch_rule(grad_hy_value, grad_hy_bdim, workspace_value, workspace_bdim, has_bias); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple _thnn_differentiable_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) { + return at::_ops::_thnn_differentiable_gru_cell_backward::call(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias); + } + Tensor grad_hy_value; + optional grad_hy_bdim; + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy, cur_level); + Tensor input_gates_value; + optional input_gates_bdim; + std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level); + Tensor hidden_gates_value; + optional hidden_gates_bdim; + std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + optional input_bias_value; + optional input_bias_bdim; + if (input_bias) { + std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level); + } + optional hidden_bias_value; + optional hidden_bias_bdim; + if (hidden_bias) { + std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level); + } + auto results = batch_rule(grad_hy_value, grad_hy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple lstm_input_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::lstm_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple lstm_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::lstm_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + Tensor data_value; + optional data_bdim; + std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level); + Tensor batch_sizes_value; + optional batch_sizes_bdim; + std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level); + auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple gru_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::gru_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple gru_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::gru_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + Tensor data_value; + optional data_bdim; + std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level); + Tensor batch_sizes_value; + optional batch_sizes_bdim; + std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple rnn_tanh_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::rnn_tanh_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple rnn_tanh_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::rnn_tanh_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + Tensor data_value; + optional data_bdim; + std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level); + Tensor batch_sizes_value; + optional batch_sizes_bdim; + std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple rnn_relu_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::rnn_relu_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple rnn_relu_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::rnn_relu_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + Tensor data_value; + optional data_bdim; + std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level); + Tensor batch_sizes_value; + optional batch_sizes_bdim; + std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) { + return at::_ops::lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor w_ih_value; + optional w_ih_bdim; + std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level); + Tensor w_hh_value; + optional w_hh_bdim; + std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level); + optional b_ih_value; + optional b_ih_bdim; + if (b_ih) { + std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level); + } + optional b_hh_value; + optional b_hh_bdim; + if (b_hh) { + std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) { + return at::_ops::gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + Tensor w_ih_value; + optional w_ih_bdim; + std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level); + Tensor w_hh_value; + optional w_hh_bdim; + std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level); + optional b_ih_value; + optional b_ih_bdim; + if (b_ih) { + std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level); + } + optional b_hh_value; + optional b_hh_bdim; + if (b_hh) { + std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) { + return at::_ops::rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + Tensor w_ih_value; + optional w_ih_bdim; + std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level); + Tensor w_hh_value; + optional w_hh_bdim; + std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level); + optional b_ih_value; + optional b_ih_bdim; + if (b_ih) { + std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level); + } + optional b_hh_value; + optional b_hh_bdim; + if (b_hh) { + std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) { + return at::_ops::rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + Tensor w_ih_value; + optional w_ih_bdim; + std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level); + Tensor w_hh_value; + optional w_hh_bdim; + std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level); + optional b_ih_value; + optional b_ih_bdim; + if (b_ih) { + std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level); + } + optional b_hh_value; + optional b_hh_bdim; + if (b_hh) { + std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple quantized_lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) { + return at::_ops::quantized_lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor w_ih_value; + optional w_ih_bdim; + std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level); + Tensor w_hh_value; + optional w_hh_bdim; + std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level); + Tensor b_ih_value; + optional b_ih_bdim; + std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level); + Tensor b_hh_value; + optional b_hh_bdim; + std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level); + Tensor packed_ih_value; + optional packed_ih_bdim; + std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level); + Tensor packed_hh_value; + optional packed_hh_bdim; + std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level); + Tensor col_offsets_ih_value; + optional col_offsets_ih_bdim; + std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level); + Tensor col_offsets_hh_value; + optional col_offsets_hh_bdim; + std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level); + auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor quantized_gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) { + return at::_ops::quantized_gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + Tensor w_ih_value; + optional w_ih_bdim; + std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level); + Tensor w_hh_value; + optional w_hh_bdim; + std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level); + Tensor b_ih_value; + optional b_ih_bdim; + std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level); + Tensor b_hh_value; + optional b_hh_bdim; + std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level); + Tensor packed_ih_value; + optional packed_ih_bdim; + std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level); + Tensor packed_hh_value; + optional packed_hh_bdim; + std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level); + Tensor col_offsets_ih_value; + optional col_offsets_ih_bdim; + std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level); + Tensor col_offsets_hh_value; + optional col_offsets_hh_bdim; + std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level); + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantized_rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) { + return at::_ops::quantized_rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + Tensor w_ih_value; + optional w_ih_bdim; + std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level); + Tensor w_hh_value; + optional w_hh_bdim; + std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level); + Tensor b_ih_value; + optional b_ih_bdim; + std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level); + Tensor b_hh_value; + optional b_hh_bdim; + std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level); + Tensor packed_ih_value; + optional packed_ih_bdim; + std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level); + Tensor packed_hh_value; + optional packed_hh_bdim; + std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level); + Tensor col_offsets_ih_value; + optional col_offsets_ih_bdim; + std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level); + Tensor col_offsets_hh_value; + optional col_offsets_hh_bdim; + std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level); + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantized_rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) { + return at::_ops::quantized_rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + Tensor w_ih_value; + optional w_ih_bdim; + std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level); + Tensor w_hh_value; + optional w_hh_bdim; + std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level); + Tensor b_ih_value; + optional b_ih_bdim; + std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level); + Tensor b_hh_value; + optional b_hh_bdim; + std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level); + Tensor packed_ih_value; + optional packed_ih_bdim; + std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level); + Tensor packed_hh_value; + optional packed_hh_bdim; + std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level); + Tensor col_offsets_ih_value; + optional col_offsets_ih_bdim; + std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level); + Tensor col_offsets_hh_value; + optional col_offsets_hh_bdim; + std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level); + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _pack_padded_sequence_generated_plumbing(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(lengths, cur_level)) { + return at::_ops::_pack_padded_sequence::call(input, lengths, batch_first); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor lengths_value; + optional lengths_bdim; + std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths, cur_level); + auto results = batch_rule(input_value, input_bdim, lengths_value, lengths_bdim, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _pack_padded_sequence_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) { + return at::_ops::_pack_padded_sequence_backward::call(grad, input_size, batch_sizes, batch_first); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor batch_sizes_value; + optional batch_sizes_bdim; + std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level); + auto results = batch_rule(grad_value, grad_bdim, input_size, batch_sizes_value, batch_sizes_bdim, batch_first); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _pad_packed_sequence_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) { + return at::_ops::_pad_packed_sequence::call(data, batch_sizes, batch_first, padding_value, total_length); + } + Tensor data_value; + optional data_bdim; + std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level); + Tensor batch_sizes_value; + optional batch_sizes_bdim; + std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level); + auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, batch_first, padding_value, total_length); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor lift_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lift::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor lift_fresh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lift_fresh::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor lift_fresh_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lift_fresh_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & masked_fill__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::masked_fill__Scalar::call(self, mask, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + batch_rule(self_value, self_bdim, mask_value, mask_bdim, value); + return self; +} +template +at::Tensor masked_fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::masked_fill_Scalar::call(self, mask, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & masked_fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::masked_fill__Tensor::call(self, mask, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim); + return self; +} +template +at::Tensor masked_fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::masked_fill_Tensor::call(self, mask, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & masked_scatter__generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::masked_scatter_::call(self, mask, source); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim); + return self; +} +template +at::Tensor masked_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::masked_scatter::call(self, mask, source); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor masked_scatter_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::masked_scatter_backward::call(grad_output, mask, sizes); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, mask_value, mask_bdim, sizes); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _masked_softmax_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, c10::optional dim, c10::optional mask_type) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_masked_softmax::call(self, mask, dim, mask_type); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, dim, mask_type); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _masked_softmax_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_masked_softmax_backward::call(grad_output, output, mask, dim); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, mask_value, mask_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view::call(self, size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_dtype::call(self, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & put__generated_plumbing(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::put_::call(self, index, source, accumulate); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate); + return self; +} +template +at::Tensor put_generated_plumbing(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::put::call(self, index, source, accumulate); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_add_::call(self, dim, index, source, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha); + return self; +} +template +at::Tensor index_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_add::call(self, dim, index, source, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor index_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_add_dimname::call(self, dim, index, source, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_reduce__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_reduce_::call(self, dim, index, source, reduce, include_self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self); + return self; +} +template +at::Tensor index_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_reduce::call(self, dim, index, source, reduce, include_self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_fill__int_Scalar_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_fill__int_Scalar::call(self, dim, index, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return self; +} +template +at::Tensor index_fill_int_Scalar_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_fill_int_Scalar::call(self, dim, index, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_fill__int_Tensor_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::index_fill__int_Tensor::call(self, dim, index, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim); + return self; +} +template +at::Tensor index_fill_int_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::index_fill_int_Tensor::call(self, dim, index, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_fill__Dimname_Scalar_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_fill__Dimname_Scalar::call(self, dim, index, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return self; +} +template +at::Tensor & index_fill__Dimname_Tensor_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::index_fill__Dimname_Tensor::call(self, dim, index, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim); + return self; +} +template +at::Tensor index_fill_Dimname_Scalar_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_fill_Dimname_Scalar::call(self, dim, index, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor index_fill_Dimname_Tensor_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::index_fill_Dimname_Tensor::call(self, dim, index, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor scatter_src_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_src::call(self, dim, index, src); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & scatter__src_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter__src::call(self, dim, index, src); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim); + return self; +} +template +at::Tensor scatter_value_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::scatter_value::call(self, dim, index, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & scatter__value_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::scatter__value::call(self, dim, index, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return self; +} +template +at::Tensor scatter_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_reduce::call(self, dim, index, src, reduce); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & scatter__reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter__reduce::call(self, dim, index, src, reduce); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce); + return self; +} +template +at::Tensor scatter_value_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::scatter_value_reduce::call(self, dim, index, value, reduce); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & scatter__value_reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::scatter__value_reduce::call(self, dim, index, value, reduce); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce); + return self; +} +template +at::Tensor scatter_dimname_src_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_dimname_src::call(self, dim, index, src); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor scatter_dimname_value_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::scatter_dimname_value::call(self, dim, index, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor scatter_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_add::call(self, dim, index, src); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & scatter_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_add_::call(self, dim, index, src); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim); + return self; +} +template +at::Tensor scatter_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_add_dimname::call(self, dim, index, src); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor scatter_reduce_two_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_reduce_two::call(self, dim, index, src, reduce, include_self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & scatter_reduce__two_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_reduce__two::call(self, dim, index, src, reduce, include_self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self); + return self; +} +template +at::Tensor & eq__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::eq__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & eq__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::eq__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_and_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_and_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_and_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_and_Scalar_Tensor::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_and_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_and_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_and__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_and__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & bitwise_and__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_and__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor __and___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__and___Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor __and___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__and___Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & __iand___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__iand___Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & __iand___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__iand___Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_or_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_or_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_or_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_or_Scalar_Tensor::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_or_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_or_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_or__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_or__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & bitwise_or__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_or__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor __or___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__or___Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor __or___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__or___Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & __ior___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__ior___Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & __ior___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__ior___Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_xor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_xor_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_xor_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_xor_Scalar_Tensor::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_xor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_xor_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_xor__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_xor__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & bitwise_xor__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_xor__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor __xor___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__xor___Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor __xor___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__xor___Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & __ixor___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__ixor___Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & __ixor___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__ixor___Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor __lshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__lshift___Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor __lshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__lshift___Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & __ilshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__ilshift___Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & __ilshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__ilshift___Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_left_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_left_shift_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_left_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_left_shift__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_left_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_left_shift_Tensor_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_left_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_left_shift__Tensor_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor bitwise_left_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_left_shift_Scalar_Tensor::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor __rshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__rshift___Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor __rshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__rshift___Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & __irshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__irshift___Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & __irshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__irshift___Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_right_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_right_shift_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_right_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_right_shift__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_right_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_right_shift_Tensor_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_right_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_right_shift__Tensor_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor bitwise_right_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_right_shift_Scalar_Tensor::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & tril__generated_plumbing(at::Tensor & self, int64_t diagonal) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tril_::call(self, diagonal); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, diagonal); + return self; +} +template +at::Tensor & triu__generated_plumbing(at::Tensor & self, int64_t diagonal) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::triu_::call(self, diagonal); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, diagonal); + return self; +} +template +at::Tensor & digamma__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::digamma_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor & lerp__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) { + return at::_ops::lerp__Scalar::call(self, end, weight); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor end_value; + optional end_bdim; + std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level); + batch_rule(self_value, self_bdim, end_value, end_bdim, weight); + return self; +} +template +at::Tensor & lerp__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::lerp__Tensor::call(self, end, weight); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor end_value; + optional end_bdim; + std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim); + return self; +} +template +at::Tensor & addbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) { + return at::_ops::addbmm_::call(self, batch1, batch2, beta, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor batch1_value; + optional batch1_bdim; + std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level); + Tensor batch2_value; + optional batch2_bdim; + std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level); + batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha); + return self; +} +template +at::Tensor addbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) { + return at::_ops::addbmm::call(self, batch1, batch2, beta, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor batch1_value; + optional batch1_bdim; + std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level); + Tensor batch2_value; + optional batch2_bdim; + std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level); + auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & random__from_generated_plumbing(at::Tensor & self, int64_t from, c10::optional to, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::random__from::call(self, from, to, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, from, to, generator); + return self; +} +template +at::Tensor & random__to_generated_plumbing(at::Tensor & self, int64_t to, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::random__to::call(self, to, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, to, generator); + return self; +} +template +at::Tensor & random__generated_plumbing(at::Tensor & self, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::random_::call(self, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, generator); + return self; +} +template +at::Tensor & uniform__generated_plumbing(at::Tensor & self, double from, double to, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::uniform_::call(self, from, to, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, from, to, generator); + return self; +} +template +at::Tensor & cauchy__generated_plumbing(at::Tensor & self, double median, double sigma, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cauchy_::call(self, median, sigma, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, median, sigma, generator); + return self; +} +template +at::Tensor & log_normal__generated_plumbing(at::Tensor & self, double mean, double std, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_normal_::call(self, mean, std, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, mean, std, generator); + return self; +} +template +at::Tensor & exponential__generated_plumbing(at::Tensor & self, double lambd, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::exponential_::call(self, lambd, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, lambd, generator); + return self; +} +template +at::Tensor & geometric__generated_plumbing(at::Tensor & self, double p, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::geometric_::call(self, p, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, generator); + return self; +} +template +at::Tensor diag_generated_plumbing(const at::Tensor & self, int64_t diagonal) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::diag::call(self, diagonal); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, diagonal); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::cross::call(self, other, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor triu_generated_plumbing(const at::Tensor & self, int64_t diagonal) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::triu::call(self, diagonal); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, diagonal); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor tril_generated_plumbing(const at::Tensor & self, int64_t diagonal) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tril::call(self, diagonal); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, diagonal); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trace_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::trace::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trace_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef sizes) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level)) { + return at::_ops::trace_backward::call(grad, sizes); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + auto results = batch_rule(grad_value, grad_bdim, sizes); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ne_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ne_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ne_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::ne_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & ne__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ne__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & ne__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::ne__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor not_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::not_equal_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor not_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::not_equal_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & not_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::not_equal__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & not_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::not_equal__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor eq_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::eq_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor eq_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::eq_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ge_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ge_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ge_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::ge_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & ge__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ge__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & ge__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::ge__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor greater_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::greater_equal_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor greater_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::greater_equal_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & greater_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::greater_equal__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & greater_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::greater_equal__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor le_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::le_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor le_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::le_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & le__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::le__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & le__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::le__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor less_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::less_equal_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor less_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::less_equal_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & less_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::less_equal__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & less_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::less_equal__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor gt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gt_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor gt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::gt_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & gt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gt__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & gt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::gt__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor greater_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::greater_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor greater_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::greater_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & greater__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::greater__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & greater__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::greater__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor lt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lt_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor lt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::lt_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & lt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lt__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & lt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::lt__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor less_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::less_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor less_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::less_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & less__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::less__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & less__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::less__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor take_generated_plumbing(const at::Tensor & self, const at::Tensor & index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::take::call(self, index); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, index_value, index_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor take_along_dim_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, c10::optional dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::take_along_dim::call(self, indices, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor index_select_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_select::call(self, dim, index); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor index_select_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_select_dimname::call(self, dim, index); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor index_select_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_select_backward::call(grad, self_sizes, dim, index); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_sizes, dim, index_value, index_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor masked_select_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::masked_select::call(self, mask); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor masked_select_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::masked_select_backward::call(grad, input, mask); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor mask_value; + optional mask_bdim; + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, mask_value, mask_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nonzero_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nonzero::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nonzero_static_generated_plumbing(const at::Tensor & self, int64_t size, int64_t fill_value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nonzero_static::call(self, size, fill_value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, fill_value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector nonzero_numpy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nonzero_numpy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor argwhere_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::argwhere::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor gather_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::gather::call(self, dim, index, sparse_grad); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor gather_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::gather_backward::call(grad, self, dim, index, sparse_grad); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, index_value, index_bdim, sparse_grad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor gather_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::gather_dimname::call(self, dim, index, sparse_grad); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _gather_sparse_backward_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(grad, cur_level)) { + return at::_ops::_gather_sparse_backward::call(self, dim, index, grad); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor index_value; + optional index_bdim; + std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level); + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, grad_value, grad_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor addcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::addcmul::call(self, tensor1, tensor2, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor tensor1_value; + optional tensor1_bdim; + std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level); + Tensor tensor2_value; + optional tensor2_bdim; + std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level); + auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & addcmul__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::addcmul_::call(self, tensor1, tensor2, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor tensor1_value; + optional tensor1_bdim; + std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level); + Tensor tensor2_value; + optional tensor2_bdim; + std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level); + batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value); + return self; +} +template +at::Tensor addcdiv_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::addcdiv::call(self, tensor1, tensor2, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor tensor1_value; + optional tensor1_bdim; + std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level); + Tensor tensor2_value; + optional tensor2_bdim; + std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level); + auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & addcdiv__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::addcdiv_::call(self, tensor1, tensor2, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor tensor1_value; + optional tensor1_bdim; + std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level); + Tensor tensor2_value; + optional tensor2_bdim; + std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level); + batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value); + return self; +} +template +at::Tensor cross_entropy_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::cross_entropy_loss::call(self, target, weight, reduction, ignore_index, label_smoothing); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, label_smoothing); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple triangular_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) { + return at::_ops::triangular_solve::call(self, A, upper, transpose, unitriangular); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper, transpose, unitriangular); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +void _linalg_check_errors_generated_plumbing(const at::Tensor & info, c10::string_view api_name, bool is_matrix) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(info, cur_level)) { + return at::_ops::_linalg_check_errors::call(info, api_name, is_matrix); + } + Tensor info_value; + optional info_bdim; + std::tie(info_value, info_bdim) = unwrapTensorAtLevel(info, cur_level); + batch_rule(info_value, info_bdim, api_name, is_matrix); +} +template +at::Tensor linalg_solve_triangular_generated_plumbing(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(B, cur_level)) { + return at::_ops::linalg_solve_triangular::call(self, B, upper, left, unitriangular); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor B_value; + optional B_bdim; + std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level); + auto results = batch_rule(self_value, self_bdim, B_value, B_bdim, upper, left, unitriangular); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_vander_generated_plumbing(const at::Tensor & x, c10::optional N) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::linalg_vander::call(x, N); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, N); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple svd_generated_plumbing(const at::Tensor & self, bool some, bool compute_uv) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::svd::call(self, some, compute_uv); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, some, compute_uv); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor swapaxes_generated_plumbing(const at::Tensor & self, int64_t axis0, int64_t axis1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::swapaxes::call(self, axis0, axis1); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, axis0, axis1); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor swapdims_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::swapdims::call(self, dim0, dim1); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim0, dim1); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cholesky_generated_plumbing(const at::Tensor & self, bool upper) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cholesky::call(self, upper); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, upper); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cholesky_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, bool upper) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) { + return at::_ops::cholesky_solve::call(self, input2, upper); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor input2_value; + optional input2_bdim; + std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level); + auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, upper); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cholesky_solve_helper_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) { + return at::_ops::_cholesky_solve_helper::call(self, A, upper); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cholesky_inverse_generated_plumbing(const at::Tensor & self, bool upper) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cholesky_inverse::call(self, upper); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, upper); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple qr_generated_plumbing(const at::Tensor & self, bool some) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::qr::call(self, some); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, some); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple geqrf_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::geqrf::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor orgqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) { + return at::_ops::orgqr::call(self, input2); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor input2_value; + optional input2_bdim; + std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level); + auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ormqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(input3, cur_level)) { + return at::_ops::ormqr::call(self, input2, input3, left, transpose); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor input2_value; + optional input2_bdim; + std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level); + Tensor input3_value; + optional input3_bdim; + std::tie(input3_value, input3_bdim) = unwrapTensorAtLevel(input3, cur_level); + auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, input3_value, input3_bdim, left, transpose); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _lu_with_info_generated_plumbing(const at::Tensor & self, bool pivot, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_lu_with_info::call(self, pivot, check_errors); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, pivot, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor lu_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) { + return at::_ops::lu_solve::call(self, LU_data, LU_pivots); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor LU_data_value; + optional LU_data_bdim; + std::tie(LU_data_value, LU_data_bdim) = unwrapTensorAtLevel(LU_data, cur_level); + Tensor LU_pivots_value; + optional LU_pivots_bdim; + std::tie(LU_pivots_value, LU_pivots_bdim) = unwrapTensorAtLevel(LU_pivots, cur_level); + auto results = batch_rule(self_value, self_bdim, LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple lu_unpack_generated_plumbing(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) { + return at::_ops::lu_unpack::call(LU_data, LU_pivots, unpack_data, unpack_pivots); + } + Tensor LU_data_value; + optional LU_data_bdim; + std::tie(LU_data_value, LU_data_bdim) = unwrapTensorAtLevel(LU_data, cur_level); + Tensor LU_pivots_value; + optional LU_pivots_bdim; + std::tie(LU_pivots_value, LU_pivots_bdim) = unwrapTensorAtLevel(LU_pivots, cur_level); + auto results = batch_rule(LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim, unpack_data, unpack_pivots); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor multinomial_generated_plumbing(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::multinomial::call(self, num_samples, replacement, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, num_samples, replacement, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & lgamma__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lgamma_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor lgamma_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lgamma::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor digamma_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::digamma::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor polygamma_generated_plumbing(int64_t n, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::polygamma::call(n, self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(n, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & polygamma__generated_plumbing(at::Tensor & self, int64_t n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::polygamma_::call(self, n); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, n); + return self; +} +template +at::Tensor erfinv_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::erfinv::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & erfinv__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::erfinv_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor i0_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::i0::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & i0__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::i0_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor sign_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sign::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sign__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sign_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor signbit_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::signbit::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor dist_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::dist::call(self, other, p); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & atan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::atan2_::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor atan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::atan2::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor arctan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::arctan2::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arctan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::arctan2_::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor lerp_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) { + return at::_ops::lerp_Scalar::call(self, end, weight); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor end_value; + optional end_bdim; + std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level); + auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor lerp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::lerp_Tensor::call(self, end, weight); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor end_value; + optional end_bdim; + std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor histc_generated_plumbing(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::histc::call(self, bins, min, max); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, bins, min, max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple histogram_bins_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::histogram_bins_tensor::call(self, bins, weight, density); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor bins_value; + optional bins_bdim; + std::tie(bins_value, bins_bdim) = unwrapTensorAtLevel(bins, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins_value, bins_bdim, weight_value, weight_bdim, density); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple histogram_bin_ct_generated_plumbing(const at::Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::histogram_bin_ct::call(self, bins, range, weight, density); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::vector _histogramdd_bin_edges_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_histogramdd_bin_edges::call(self, bins, range, weight, density); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _histogramdd_from_bin_cts_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_histogramdd_from_bin_cts::call(self, bins, range, weight, density); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _histogramdd_from_bin_tensors_generated_plumbing(const at::Tensor & self, at::TensorList bins, const c10::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_histogramdd_from_bin_tensors::call(self, bins, weight, density); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, weight_value, weight_bdim, density); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple> histogramdd_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::histogramdd::call(self, bins, range, weight, density); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple> histogramdd_int_bins_generated_plumbing(const at::Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::histogramdd_int_bins::call(self, bins, range, weight, density); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple> histogramdd_TensorList_bins_generated_plumbing(const at::Tensor & self, at::TensorList bins, c10::optional> range, const c10::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::histogramdd_TensorList_bins::call(self, bins, range, weight, density); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor fmod_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fmod_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & fmod__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fmod__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor fmod_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::fmod_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & fmod__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::fmod__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor hypot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::hypot::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & hypot__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::hypot_::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor igamma_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::igamma::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & igamma__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::igamma_::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor igammac_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::igammac::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & igammac__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::igammac_::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor nextafter_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::nextafter::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & nextafter__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::nextafter_::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor remainder_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::remainder_Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & remainder__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::remainder__Scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor remainder_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::remainder_Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & remainder__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::remainder__Tensor::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor remainder_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::remainder_Scalar_Tensor::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor min_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::min::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fmin_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::fmin::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fmax_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::fmax::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor maximum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::maximum::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::max_other::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor minimum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::minimum::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor min_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::min_other::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) { + return at::_ops::quantile::call(self, q, dim, keepdim, interpolation); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor q_value; + optional q_bdim; + std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level); + auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantile_scalar_generated_plumbing(const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::quantile_scalar::call(self, q, dim, keepdim, interpolation); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nanquantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) { + return at::_ops::nanquantile::call(self, q, dim, keepdim, interpolation); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor q_value; + optional q_bdim; + std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level); + auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nanquantile_scalar_generated_plumbing(const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nanquantile_scalar::call(self, q, dim, keepdim, interpolation); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple sort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sort::call(self, dim, descending); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, descending); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple sort_stable_generated_plumbing(const at::Tensor & self, c10::optional stable, int64_t dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sort_stable::call(self, stable, dim, descending); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, stable, dim, descending); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple sort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sort_dimname::call(self, dim, descending); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, descending); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple sort_dimname_stable_generated_plumbing(const at::Tensor & self, c10::optional stable, at::Dimname dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sort_dimname_stable::call(self, stable, dim, descending); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, stable, dim, descending); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor msort_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::msort::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor argsort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::argsort::call(self, dim, descending); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, descending); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor argsort_stable_generated_plumbing(const at::Tensor & self, bool stable, int64_t dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::argsort_stable::call(self, stable, dim, descending); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, stable, dim, descending); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor argsort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::argsort_dimname::call(self, dim, descending); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, descending); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple topk_generated_plumbing(const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::topk::call(self, k, dim, largest, sorted); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, k, dim, largest, sorted); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor all_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::all::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor any_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::any::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor renorm_generated_plumbing(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::renorm::call(self, p, dim, maxnorm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dim, maxnorm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & renorm__generated_plumbing(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::renorm_::call(self, p, dim, maxnorm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, dim, maxnorm); + return self; +} +template +at::Tensor unfold_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unfold::call(self, dimension, size, step); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dimension, size, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor unfold_backward_generated_plumbing(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_in, cur_level)) { + return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step); + } + Tensor grad_in_value; + optional grad_in_bdim; + std::tie(grad_in_value, grad_in_bdim) = unwrapTensorAtLevel(grad_in, cur_level); + auto results = batch_rule(grad_in_value, grad_in_bdim, input_sizes, dim, size, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pow_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::pow_Tensor_Tensor::call(self, exponent); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor exponent_value; + optional exponent_bdim; + std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level); + auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pow_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::pow_Scalar::call(self, exponent); + } + Tensor exponent_value; + optional exponent_bdim; + std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level); + auto results = batch_rule(self, exponent_value, exponent_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pow_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pow_Tensor_Scalar::call(self, exponent); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, exponent); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & pow__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pow__Scalar::call(self, exponent); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, exponent); + return self; +} +template +at::Tensor & pow__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::pow__Tensor::call(self, exponent); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor exponent_value; + optional exponent_bdim; + std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level); + batch_rule(self_value, self_bdim, exponent_value, exponent_bdim); + return self; +} +template +at::Tensor float_power_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::float_power_Tensor_Tensor::call(self, exponent); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor exponent_value; + optional exponent_bdim; + std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level); + auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor float_power_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::float_power_Scalar::call(self, exponent); + } + Tensor exponent_value; + optional exponent_bdim; + std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level); + auto results = batch_rule(self, exponent_value, exponent_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor float_power_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::float_power_Tensor_Scalar::call(self, exponent); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, exponent); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & float_power__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::float_power__Scalar::call(self, exponent); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, exponent); + return self; +} +template +at::Tensor & float_power__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::float_power__Tensor::call(self, exponent); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor exponent_value; + optional exponent_bdim; + std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level); + batch_rule(self_value, self_bdim, exponent_value, exponent_bdim); + return self; +} +template +at::Tensor & normal__generated_plumbing(at::Tensor & self, double mean, double std, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::normal_::call(self, mean, std, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, mean, std, generator); + return self; +} +template +at::Tensor normal_functional_generated_plumbing(const at::Tensor & self, double mean, double std, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::normal_functional::call(self, mean, std, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, mean, std, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor normal_Tensor_float_generated_plumbing(const at::Tensor & mean, double std, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(mean, cur_level)) { + return at::_ops::normal_Tensor_float::call(mean, std, generator); + } + Tensor mean_value; + optional mean_bdim; + std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level); + auto results = batch_rule(mean_value, mean_bdim, std, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor normal_float_Tensor_generated_plumbing(double mean, const at::Tensor & std, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(std, cur_level)) { + return at::_ops::normal_float_Tensor::call(mean, std, generator); + } + Tensor std_value; + optional std_bdim; + std::tie(std_value, std_bdim) = unwrapTensorAtLevel(std, cur_level); + auto results = batch_rule(mean, std_value, std_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor normal_Tensor_Tensor_generated_plumbing(const at::Tensor & mean, const at::Tensor & std, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(std, cur_level)) { + return at::_ops::normal_Tensor_Tensor::call(mean, std, generator); + } + Tensor mean_value; + optional mean_bdim; + std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level); + Tensor std_value; + optional std_bdim; + std::tie(std_value, std_bdim) = unwrapTensorAtLevel(std, cur_level); + auto results = batch_rule(mean_value, mean_bdim, std_value, std_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor alias_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::alias::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _amp_foreach_non_finite_check_and_unscale__generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self, found_inf, inv_scale); + } + Tensor found_inf_value; + optional found_inf_bdim; + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level); + Tensor inv_scale_value; + optional inv_scale_bdim; + std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level); + batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim); +} +template +::std::vector _foreach_add_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_add_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_add__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_add__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_add_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_add_List::call(self, other, alpha); + } + + auto results = batch_rule(self, other, alpha); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_add__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_add__List::call(self, other, alpha); + } + + batch_rule(self, other, alpha); +} +template +::std::vector _foreach_add_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_add_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_add__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_add__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_add_Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_add_Tensor::call(self, other, alpha); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim, alpha); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_add__Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_add__Tensor::call(self, other, alpha); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self, other_value, other_bdim, alpha); +} +template +::std::vector _foreach_sub_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sub_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sub__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sub__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_sub_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_sub_List::call(self, other, alpha); + } + + auto results = batch_rule(self, other, alpha); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sub__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_sub__List::call(self, other, alpha); + } + + batch_rule(self, other, alpha); +} +template +::std::vector _foreach_sub_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sub_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sub__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sub__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_mul_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_mul_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_mul__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_mul__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_mul_List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_mul_List::call(self, other); + } + + auto results = batch_rule(self, other); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_mul__List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_mul__List::call(self, other); + } + + batch_rule(self, other); +} +template +::std::vector _foreach_mul_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_mul_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_mul__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_mul__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_mul_Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_mul_Tensor::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_mul__Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_mul__Tensor::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self, other_value, other_bdim); +} +template +::std::vector _foreach_div_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_div_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_div__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_div__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_div_List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_div_List::call(self, other); + } + + auto results = batch_rule(self, other); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_div__List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_div__List::call(self, other); + } + + batch_rule(self, other); +} +template +::std::vector _foreach_div_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_div_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_div__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_div__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_div_Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_div_Tensor::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_div__Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_div__Tensor::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self, other_value, other_bdim); +} +template +::std::vector _foreach_clamp_max_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_max_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_clamp_max__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_max__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_clamp_max_List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_clamp_max_List::call(self, other); + } + + auto results = batch_rule(self, other); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_clamp_max__List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_clamp_max__List::call(self, other); + } + + batch_rule(self, other); +} +template +::std::vector _foreach_clamp_max_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_max_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_clamp_max__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_max__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_clamp_min_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_min_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_clamp_min__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_min__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_clamp_min_List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_clamp_min_List::call(self, other); + } + + auto results = batch_rule(self, other); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_clamp_min__List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_clamp_min__List::call(self, other); + } + + batch_rule(self, other); +} +template +::std::vector _foreach_clamp_min_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_min_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_clamp_min__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_min__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_maximum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_maximum_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_maximum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_maximum__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_maximum_List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_maximum_List::call(self, other); + } + + auto results = batch_rule(self, other); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_maximum__List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_maximum__List::call(self, other); + } + + batch_rule(self, other); +} +template +::std::vector _foreach_maximum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_maximum_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_maximum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_maximum__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_minimum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_minimum_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_minimum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_minimum__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_minimum_List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_minimum_List::call(self, other); + } + + auto results = batch_rule(self, other); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_minimum__List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_minimum__List::call(self, other); + } + + batch_rule(self, other); +} +template +::std::vector _foreach_minimum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_minimum_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_minimum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_minimum__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_addcdiv_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcdiv_Scalar::call(self, tensor1, tensor2, value); + } + + auto results = batch_rule(self, tensor1, tensor2, value); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_addcdiv_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcdiv_ScalarList::call(self, tensor1, tensor2, scalars); + } + + auto results = batch_rule(self, tensor1, tensor2, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_addcdiv_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) { + return at::_ops::_foreach_addcdiv_Tensor::call(self, tensor1, tensor2, scalars); + } + Tensor scalars_value; + optional scalars_bdim; + std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level); + auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_addcdiv__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcdiv__Scalar::call(self, tensor1, tensor2, value); + } + + batch_rule(self, tensor1, tensor2, value); +} +template +void _foreach_addcdiv__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcdiv__ScalarList::call(self, tensor1, tensor2, scalars); + } + + batch_rule(self, tensor1, tensor2, scalars); +} +template +void _foreach_addcdiv__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) { + return at::_ops::_foreach_addcdiv__Tensor::call(self, tensor1, tensor2, scalars); + } + Tensor scalars_value; + optional scalars_bdim; + std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level); + batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim); +} +template +::std::vector _foreach_addcmul_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcmul_Scalar::call(self, tensor1, tensor2, value); + } + + auto results = batch_rule(self, tensor1, tensor2, value); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_addcmul_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcmul_ScalarList::call(self, tensor1, tensor2, scalars); + } + + auto results = batch_rule(self, tensor1, tensor2, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_addcmul_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) { + return at::_ops::_foreach_addcmul_Tensor::call(self, tensor1, tensor2, scalars); + } + Tensor scalars_value; + optional scalars_bdim; + std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level); + auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_addcmul__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcmul__Scalar::call(self, tensor1, tensor2, value); + } + + batch_rule(self, tensor1, tensor2, value); +} +template +void _foreach_addcmul__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcmul__ScalarList::call(self, tensor1, tensor2, scalars); + } + + batch_rule(self, tensor1, tensor2, scalars); +} +template +void _foreach_addcmul__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) { + return at::_ops::_foreach_addcmul__Tensor::call(self, tensor1, tensor2, scalars); + } + Tensor scalars_value; + optional scalars_bdim; + std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level); + batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim); +} +template +::std::vector _foreach_abs_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_abs::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_abs__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_abs_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_acos_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_acos::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_acos__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_acos_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_asin_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_asin::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_asin__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_asin_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_atan_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_atan::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_atan__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_atan_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_ceil_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_ceil::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_ceil__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_ceil_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_cos_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_cos::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_cos__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_cos_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_cosh_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_cosh::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_cosh__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_cosh_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_erf_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_erf::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_erf__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_erf_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_erfc_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_erfc::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_erfc__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_erfc_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_exp_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_exp::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_exp__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_exp_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_expm1_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_expm1::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_expm1__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_expm1_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_floor_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_floor::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_floor__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_floor_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_frac_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_frac::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_frac__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_frac_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_lerp_List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) { + return at::_ops::_foreach_lerp_List::call(self, tensors1, weights); + } + + auto results = batch_rule(self, tensors1, weights); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_lerp__List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) { + return at::_ops::_foreach_lerp__List::call(self, tensors1, weights); + } + + batch_rule(self, tensors1, weights); +} +template +::std::vector _foreach_lerp_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) { + return at::_ops::_foreach_lerp_Scalar::call(self, tensors1, weight); + } + + auto results = batch_rule(self, tensors1, weight); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_lerp__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) { + return at::_ops::_foreach_lerp__Scalar::call(self, tensors1, weight); + } + + batch_rule(self, tensors1, weight); +} +template +::std::vector _foreach_lgamma_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_lgamma::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_lgamma__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_lgamma_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_log_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_log__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_log10_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log10::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_log10__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log10_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_log1p_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log1p::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_log1p__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log1p_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_log2_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log2::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_log2__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log2_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_neg_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_neg::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_neg__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_neg_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_norm_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & ord) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_norm_Scalar::call(self, ord); + } + + auto results = batch_rule(self, ord); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_pow_List_generated_plumbing(at::TensorList self, at::TensorList exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::_foreach_pow_List::call(self, exponent); + } + + auto results = batch_rule(self, exponent); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_pow_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_pow_Scalar::call(self, exponent); + } + + auto results = batch_rule(self, exponent); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_pow_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_pow_ScalarList::call(self, exponent); + } + + auto results = batch_rule(self, exponent); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_pow_ScalarAndTensor_generated_plumbing(const at::Scalar & self, at::TensorList exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::_foreach_pow_ScalarAndTensor::call(self, exponent); + } + + auto results = batch_rule(self, exponent); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_pow__List_generated_plumbing(at::TensorList self, at::TensorList exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::_foreach_pow__List::call(self, exponent); + } + + batch_rule(self, exponent); +} +template +void _foreach_pow__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_pow__Scalar::call(self, exponent); + } + + batch_rule(self, exponent); +} +template +void _foreach_pow__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_pow__ScalarList::call(self, exponent); + } + + batch_rule(self, exponent); +} +template +::std::vector _foreach_reciprocal_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_reciprocal::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_reciprocal__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_reciprocal_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_round_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_round::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_round__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_round_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_sigmoid_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sigmoid::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sigmoid__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sigmoid_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_sign_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sign::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sign__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sign_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_sin_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sin::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sin__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sin_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_sinh_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sinh::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sinh__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sinh_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_sqrt_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sqrt::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sqrt__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sqrt_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_tan_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_tan::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_tan__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_tan_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_tanh_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_tanh::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_tanh__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_tanh_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_trunc_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_trunc::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_trunc__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_trunc_::call(self); + } + + batch_rule(self); +} +template +void _foreach_zero__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_zero_::call(self); + } + + batch_rule(self); +} +template +void _foreach_copy__generated_plumbing(at::TensorList self, at::TensorList src, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::_foreach_copy_::call(self, src, non_blocking); + } + + batch_rule(self, src, non_blocking); +} +template +at::Tensor bucketize_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(boundaries, cur_level)) { + return at::_ops::bucketize_Tensor::call(self, boundaries, out_int32, right); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor boundaries_value; + optional boundaries_bdim; + std::tie(boundaries_value, boundaries_bdim) = unwrapTensorAtLevel(boundaries, cur_level); + auto results = batch_rule(self_value, self_bdim, boundaries_value, boundaries_bdim, out_int32, right); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bucketize_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(boundaries, cur_level)) { + return at::_ops::bucketize_Scalar::call(self, boundaries, out_int32, right); + } + Tensor boundaries_value; + optional boundaries_bdim; + std::tie(boundaries_value, boundaries_bdim) = unwrapTensorAtLevel(boundaries, cur_level); + auto results = batch_rule(self, boundaries_value, boundaries_bdim, out_int32, right); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor searchsorted_Tensor_generated_plumbing(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sorter, cur_level)) { + return at::_ops::searchsorted_Tensor::call(sorted_sequence, self, out_int32, right, side, sorter); + } + Tensor sorted_sequence_value; + optional sorted_sequence_bdim; + std::tie(sorted_sequence_value, sorted_sequence_bdim) = unwrapTensorAtLevel(sorted_sequence, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional sorter_value; + optional sorter_bdim; + if (sorter) { + std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level); + } + auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self_value, self_bdim, out_int32, right, side, sorter_value, sorter_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor searchsorted_Scalar_generated_plumbing(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(sorter, cur_level)) { + return at::_ops::searchsorted_Scalar::call(sorted_sequence, self, out_int32, right, side, sorter); + } + Tensor sorted_sequence_value; + optional sorted_sequence_bdim; + std::tie(sorted_sequence_value, sorted_sequence_bdim) = unwrapTensorAtLevel(sorted_sequence, cur_level); + optional sorter_value; + optional sorter_bdim; + if (sorter) { + std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level); + } + auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self, out_int32, right, side, sorter_value, sorter_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _convert_indices_from_coo_to_csr_generated_plumbing(const at::Tensor & self, int64_t size, bool out_int32) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_convert_indices_from_coo_to_csr::call(self, size, out_int32); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, out_int32); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _convert_indices_from_csr_to_coo_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level)) { + return at::_ops::_convert_indices_from_csr_to_coo::call(crow_indices, col_indices, out_int32, transpose); + } + Tensor crow_indices_value; + optional crow_indices_bdim; + std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level); + Tensor col_indices_value; + optional col_indices_bdim; + std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, out_int32, transpose); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mse_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::mse_loss::call(self, target, reduction); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mse_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::mse_loss_backward::call(grad_output, self, target, reduction); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::l1_loss::call(self, target, reduction); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor multi_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::multi_margin_loss::call(self, target, p, margin, weight, reduction); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor multi_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::multi_margin_loss_backward::call(grad_output, self, target, p, margin, weight, reduction); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor multilabel_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::multilabel_margin_loss::call(self, target, reduction); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple multilabel_margin_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::multilabel_margin_loss_forward::call(self, target, reduction); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor multilabel_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(is_target, cur_level)) { + return at::_ops::multilabel_margin_loss_backward::call(grad_output, self, target, reduction, is_target); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + Tensor is_target_value; + optional is_target_bdim; + std::tie(is_target_value, is_target_bdim) = unwrapTensorAtLevel(is_target, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, is_target_value, is_target_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nll_loss_nd_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nll_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::nll_loss::call(self, target, weight, reduction, ignore_index); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple nll_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor nll_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) { + return at::_ops::nll_loss_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + Tensor total_weight_value; + optional total_weight_bdim; + std::tie(total_weight_value, total_weight_bdim) = unwrapTensorAtLevel(total_weight, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nll_loss2d_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::nll_loss2d::call(self, target, weight, reduction, ignore_index); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple nll_loss2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor nll_loss2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) { + return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + Tensor total_weight_value; + optional total_weight_bdim; + std::tie(total_weight_value, total_weight_bdim) = unwrapTensorAtLevel(total_weight, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor smooth_l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::smooth_l1_loss::call(self, target, reduction, beta); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, beta); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor smooth_l1_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::smooth_l1_loss_backward::call(grad_output, self, target, reduction, beta); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, beta); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor huber_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::huber_loss::call(self, target, reduction, delta); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, delta); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor huber_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::huber_loss_backward::call(grad_output, self, target, reduction, delta); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, delta); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor soft_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::soft_margin_loss::call(self, target, reduction); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor soft_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::soft_margin_loss_backward::call(grad_output, self, target, reduction); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor target_value; + optional target_bdim; + std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor elu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::elu::call(self, alpha, scale, input_scale); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, alpha, scale, input_scale); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor elu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self_or_result, cur_level)) { + return at::_ops::elu_backward::call(grad_output, alpha, scale, input_scale, is_result, self_or_result); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_or_result_value; + optional self_or_result_bdim; + std::tie(self_or_result_value, self_or_result_bdim) = unwrapTensorAtLevel(self_or_result, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, alpha, scale, input_scale, is_result, self_or_result_value, self_or_result_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & elu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::elu_::call(self, alpha, scale, input_scale); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, alpha, scale, input_scale); + return self; +} +template +at::Tensor glu_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::glu::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor glu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::glu_backward::call(grad_output, self, dim); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor glu_jvp_generated_plumbing(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dx, cur_level)) { + return at::_ops::glu_jvp::call(glu, x, dx, dim); + } + Tensor glu_value; + optional glu_bdim; + std::tie(glu_value, glu_bdim) = unwrapTensorAtLevel(glu, cur_level); + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor dx_value; + optional dx_bdim; + std::tie(dx_value, dx_bdim) = unwrapTensorAtLevel(dx, cur_level); + auto results = batch_rule(glu_value, glu_bdim, x_value, x_bdim, dx_value, dx_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor glu_backward_jvp_generated_plumbing(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_x, cur_level) && !isBatchedAtLevel(grad_glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dgrad_glu, cur_level) && !isBatchedAtLevel(dx, cur_level)) { + return at::_ops::glu_backward_jvp::call(grad_x, grad_glu, x, dgrad_glu, dx, dim); + } + Tensor grad_x_value; + optional grad_x_bdim; + std::tie(grad_x_value, grad_x_bdim) = unwrapTensorAtLevel(grad_x, cur_level); + Tensor grad_glu_value; + optional grad_glu_bdim; + std::tie(grad_glu_value, grad_glu_bdim) = unwrapTensorAtLevel(grad_glu, cur_level); + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor dgrad_glu_value; + optional dgrad_glu_bdim; + std::tie(dgrad_glu_value, dgrad_glu_bdim) = unwrapTensorAtLevel(dgrad_glu, cur_level); + Tensor dx_value; + optional dx_bdim; + std::tie(dx_value, dx_bdim) = unwrapTensorAtLevel(dx, cur_level); + auto results = batch_rule(grad_x_value, grad_x_bdim, grad_glu_value, grad_glu_bdim, x_value, x_bdim, dgrad_glu_value, dgrad_glu_bdim, dx_value, dx_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hardsigmoid_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardsigmoid::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & hardsigmoid__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardsigmoid_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor hardsigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardsigmoid_backward::call(grad_output, self); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hardtanh_generated_plumbing(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardtanh::call(self, min_val, max_val); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, min_val, max_val); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hardtanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardtanh_backward::call(grad_output, self, min_val, max_val); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, min_val, max_val); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & hardtanh__generated_plumbing(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardtanh_::call(self, min_val, max_val); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, min_val, max_val); + return self; +} +template +at::Tensor hardswish_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardswish::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & hardswish__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardswish_::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor hardswish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardswish_backward::call(grad_output, self); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor leaky_relu_generated_plumbing(const at::Tensor & self, const at::Scalar & negative_slope) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::leaky_relu::call(self, negative_slope); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, negative_slope); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor leaky_relu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, negative_slope, self_is_result); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & leaky_relu__generated_plumbing(at::Tensor & self, const at::Scalar & negative_slope) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::leaky_relu_::call(self, negative_slope); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, negative_slope); + return self; +} +template +at::Tensor log_sigmoid_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_sigmoid::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple log_sigmoid_forward_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_sigmoid_forward::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor log_sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(buffer, cur_level)) { + return at::_ops::log_sigmoid_backward::call(grad_output, self, buffer); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor buffer_value; + optional buffer_bdim; + std::tie(buffer_value, buffer_bdim) = unwrapTensorAtLevel(buffer, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, buffer_value, buffer_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rrelu_with_noise_generated_plumbing(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) { + return at::_ops::rrelu_with_noise::call(self, noise, lower, upper, training, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor noise_value; + optional noise_bdim; + std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level); + auto results = batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rrelu_with_noise_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) { + return at::_ops::rrelu_with_noise_backward::call(grad_output, self, noise, lower, upper, training, self_is_result); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor noise_value; + optional noise_bdim; + std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, self_is_result); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & rrelu_with_noise__generated_plumbing(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) { + return at::_ops::rrelu_with_noise_::call(self, noise, lower, upper, training, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor noise_value; + optional noise_bdim; + std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level); + batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, generator); + return self; +} +template +at::Tensor softplus_generated_plumbing(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::softplus::call(self, beta, threshold); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, beta, threshold); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor softplus_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::softplus_backward::call(grad_output, self, beta, threshold); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, beta, threshold); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor softshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::softshrink::call(self, lambd); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, lambd); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor softshrink_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::softshrink_backward::call(grad_output, self, lambd); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, lambd); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adaptive_avg_pool2d::call(self, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mkldnn_adaptive_avg_pool2d::call(self, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::mkldnn_adaptive_avg_pool2d_backward::call(grad_output, self); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_adaptive_avg_pool2d::call(self, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_adaptive_avg_pool2d_backward::call(grad_output, self); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adaptive_avg_pool3d::call(self, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_adaptive_avg_pool3d::call(self, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _adaptive_avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_adaptive_avg_pool3d_backward::call(grad_output, self); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple adaptive_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adaptive_max_pool2d::call(self, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor adaptive_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::adaptive_max_pool2d_backward::call(grad_output, self, indices); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple adaptive_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adaptive_max_pool3d::call(self, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor adaptive_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::adaptive_max_pool3d_backward::call(grad_output, self, indices); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::avg_pool2d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::avg_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor avg_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::avg_pool3d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::avg_pool3d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple fractional_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) { + return at::_ops::fractional_max_pool2d::call(self, kernel_size, output_size, random_samples); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor random_samples_value; + optional random_samples_bdim; + std::tie(random_samples_value, random_samples_bdim) = unwrapTensorAtLevel(random_samples, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor fractional_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::fractional_max_pool2d_backward::call(grad_output, self, kernel_size, output_size, indices); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple fractional_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) { + return at::_ops::fractional_max_pool3d::call(self, kernel_size, output_size, random_samples); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor random_samples_value; + optional random_samples_bdim; + std::tie(random_samples_value, random_samples_bdim) = unwrapTensorAtLevel(random_samples, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor fractional_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::fractional_max_pool3d_backward::call(grad_output, self, kernel_size, output_size, indices); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple max_pool2d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool2d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor max_pool2d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::max_pool2d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple max_pool3d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool3d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor max_pool3d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::max_pool3d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_unpool2d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::max_unpool2d::call(self, indices, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_unpool3d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::max_unpool3d::call(self, indices, output_size, stride, padding); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size, stride, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reflection_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::reflection_pad1d::call(self, padding); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reflection_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::reflection_pad1d_backward::call(grad_output, self, padding); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reflection_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::reflection_pad2d::call(self, padding); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reflection_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::reflection_pad2d_backward::call(grad_output, self, padding); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reflection_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::reflection_pad3d::call(self, padding); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reflection_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::reflection_pad3d_backward::call(grad_output, self, padding); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor replication_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::replication_pad1d::call(self, padding); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor replication_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::replication_pad1d_backward::call(grad_output, self, padding); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor replication_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::replication_pad2d::call(self, padding); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor replication_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::replication_pad2d_backward::call(grad_output, self, padding); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor replication_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::replication_pad3d::call(self, padding); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor replication_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::replication_pad3d_backward::call(grad_output, self, padding); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _pad_circular_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_pad_circular::call(self, pad); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, pad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _pad_enum_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_pad_enum::call(self, pad, mode, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, pad, mode, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pad_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pad::call(self, pad, mode, value); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, pad, mode, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_linear1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_linear1d_vec::call(input, output_size, align_corners, scale_factors); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_bilinear2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_bilinear2d_vec::call(input, output_size, align_corners, scale_factors); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_bilinear2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::_upsample_bilinear2d_aa_vec::call(input, output_size, align_corners, scale_factors); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_trilinear3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_bicubic2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_bicubic2d_vec::call(input, output_size, align_corners, scale_factors); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_bicubic2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::_upsample_bicubic2d_aa_vec::call(input, output_size, align_corners, scale_factors); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_nearest1d_vec::call(input, output_size, scale_factors); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size, scale_factors); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_nearest2d_vec::call(input, output_size, scale_factors); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size, scale_factors); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_nearest3d_vec::call(input, output_size, scale_factors); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::_upsample_nearest_exact3d_vec::call(input, output_size, scale_factors); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_linear1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_linear1d::call(self, output_size, align_corners, scales); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_linear1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_bilinear2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_bilinear2d::call(self, output_size, align_corners, scales_h, scales_w); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_bilinear2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_bilinear2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_bilinear2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_upsample_bilinear2d_aa::call(self, output_size, align_corners, scales_h, scales_w); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_bilinear2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::_upsample_bilinear2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_bicubic2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_bicubic2d::call(self, output_size, align_corners, scales_h, scales_w); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_bicubic2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_bicubic2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_bicubic2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_upsample_bicubic2d_aa::call(self, output_size, align_corners, scales_h, scales_w); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_bicubic2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_trilinear3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_d, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_trilinear3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_trilinear3d_backward::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_d, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_nearest1d::call(self, output_size, scales); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, scales); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_upsample_nearest_exact1d::call(self, output_size, scales); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, scales); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_nearest1d_backward::call(grad_output, output_size, input_size, scales); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, output_size, input_size, scales); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_nearest2d::call(self, output_size, scales_h, scales_w); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_upsample_nearest_exact2d::call(self, output_size, scales_h, scales_w); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_nearest2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::_upsample_nearest_exact2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_nearest3d::call(self, output_size, scales_d, scales_h, scales_w); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_upsample_nearest_exact3d::call(self, output_size, scales_d, scales_h, scales_w); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_nearest3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::sigmoid_backward::call(grad_output, output); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logit_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::logit_backward::call(grad_output, self, eps); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, eps); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor tanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::tanh_backward::call(grad_output, output); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slow_conv_transpose2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slow_conv_transpose3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::slow_conv_transpose3d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor thnn_conv2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::thnn_conv2d::call(self, weight, kernel_size, bias, stride, padding); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _slow_conv2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_slow_conv2d_forward::call(self, weight, kernel_size, bias, stride, padding); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _slow_conv2d_backward_output_mask_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, kernel_size, stride, padding, output_mask); + } + Tensor grad_output_value; + optional grad_output_bdim; + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level); + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim, kernel_size, stride, padding, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _conv_depthwise2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_conv_depthwise2d::call(self, weight, kernel_size, bias, stride, padding, dilation); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv_depthwise3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv_depthwise3d::call(self, weight, kernel_size, bias, stride, padding, dilation); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slow_conv3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, padding); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slow_conv3d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, padding); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slow_conv_dilated2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slow_conv_dilated3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::slow_conv_dilated3d::call(self, weight, kernel_size, bias, stride, padding, dilation); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor weight_value; + optional weight_bdim; + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor col2im_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, kernel_size, dilation, padding, stride); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor column_stack_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::column_stack::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor im2col_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::im2col::call(self, kernel_size, dilation, padding, stride); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, dilation, padding, stride); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isfinite_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::isfinite::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isinf_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::isinf::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void record_stream_generated_plumbing(at::Tensor & self, at::Stream s) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::record_stream::call(self, s); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, s); +} +template +at::Tensor isposinf_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::isposinf::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isneginf_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::isneginf::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _add_batch_dim_generated_plumbing(const at::Tensor & self, int64_t batch_dim, int64_t level) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_add_batch_dim::call(self, batch_dim, level); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, batch_dim, level); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _remove_batch_dim_generated_plumbing(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_remove_batch_dim::call(self, level, batch_size, out_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, level, batch_size, out_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_entr_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_entr::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_ndtri_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_ndtri::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_log_ndtr_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_log_ndtr::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_expm1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_expm1::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_exp2_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_exp2::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_psi_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_psi::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_digamma_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_digamma::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_gammaln_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_gammaln::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_erf_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_erf::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_erfc_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_erfc::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_erfcx_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_erfcx::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_erfinv_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_erfinv::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_ndtr_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_ndtr::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_xlog1py_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_xlog1py::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_xlog1py_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_xlog1py_self_scalar::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_xlog1py_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_xlog1py_other_scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_xlogy_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_xlogy::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_xlogy_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_xlogy_self_scalar::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_xlogy_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_xlogy_other_scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_zeta_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_zeta::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_zeta_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_zeta_self_scalar::call(self, other); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_zeta_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_zeta_other_scalar::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_i0_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_i0::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_i0e_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_i0e::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_i1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_i1::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_i1e_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_i1e::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_logit_generated_plumbing(const at::Tensor & self, c10::optional eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_logit::call(self, eps); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, eps); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_polygamma_generated_plumbing(int64_t n, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_polygamma::call(n, self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(n, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_logsumexp::call(self, dim, keepdim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_expit_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_expit::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_sinc_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_sinc::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_round_generated_plumbing(const at::Tensor & self, int64_t decimals) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_round::call(self, decimals); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, decimals); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_log1p_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_log1p::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_log_softmax::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_gammainc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_gammainc::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_gammaincc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_gammaincc::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_multigammaln_generated_plumbing(const at::Tensor & self, int64_t p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_multigammaln::call(self, p); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_softmax::call(self, dim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_fft_generated_plumbing(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_fft::call(self, n, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ifft_generated_plumbing(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ifft::call(self, n, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_rfft_generated_plumbing(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_rfft::call(self, n, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_irfft_generated_plumbing(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_irfft::call(self, n, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_hfft_generated_plumbing(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_hfft::call(self, n, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ihfft_generated_plumbing(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ihfft::call(self, n, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_fft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_fft2::call(self, s, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ifft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ifft2::call(self, s, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_rfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_rfft2::call(self, s, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_irfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_irfft2::call(self, s, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_hfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_hfft2::call(self, s, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ihfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ihfft2::call(self, s, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_fftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_fftn::call(self, s, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ifftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ifftn::call(self, s, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_rfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_rfftn::call(self, s, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_irfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_irfftn::call(self, s, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_hfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_hfftn::call(self, s, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ihfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ihfftn::call(self, s, dim, norm); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_fftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_fftshift::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ifftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ifftshift::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_cholesky_ex_generated_plumbing(const at::Tensor & self, bool upper, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_cholesky_ex::call(self, upper, check_errors); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, upper, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor linalg_cholesky_generated_plumbing(const at::Tensor & self, bool upper) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_cholesky::call(self, upper); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, upper); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::linalg_cross::call(self, other, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_lu_factor_generated_plumbing(const at::Tensor & A, bool pivot) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_lu_factor::call(A, pivot); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, pivot); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple linalg_lu_factor_ex_generated_plumbing(const at::Tensor & A, bool pivot, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_lu_factor_ex::call(A, pivot, check_errors); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, pivot, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple linalg_lu_generated_plumbing(const at::Tensor & A, bool pivot) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_lu::call(A, pivot); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, pivot); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor linalg_lu_solve_generated_plumbing(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(LU, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) { + return at::_ops::linalg_lu_solve::call(LU, pivots, B, left, adjoint); + } + Tensor LU_value; + optional LU_bdim; + std::tie(LU_value, LU_bdim) = unwrapTensorAtLevel(LU, cur_level); + Tensor pivots_value; + optional pivots_bdim; + std::tie(pivots_value, pivots_bdim) = unwrapTensorAtLevel(pivots, cur_level); + Tensor B_value; + optional B_bdim; + std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level); + auto results = batch_rule(LU_value, LU_bdim, pivots_value, pivots_bdim, B_value, B_bdim, left, adjoint); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _linalg_det_generated_plumbing(const at::Tensor & A) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::_linalg_det::call(A); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor linalg_det_generated_plumbing(const at::Tensor & A) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_det::call(A); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor det_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::det::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_ldl_factor_ex_generated_plumbing(const at::Tensor & self, bool hermitian, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_ldl_factor_ex::call(self, hermitian, check_errors); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, hermitian, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple linalg_ldl_factor_generated_plumbing(const at::Tensor & self, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_ldl_factor::call(self, hermitian); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, hermitian); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor linalg_ldl_solve_generated_plumbing(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(LD, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) { + return at::_ops::linalg_ldl_solve::call(LD, pivots, B, hermitian); + } + Tensor LD_value; + optional LD_bdim; + std::tie(LD_value, LD_bdim) = unwrapTensorAtLevel(LD, cur_level); + Tensor pivots_value; + optional pivots_bdim; + std::tie(pivots_value, pivots_bdim) = unwrapTensorAtLevel(pivots, cur_level); + Tensor B_value; + optional B_bdim; + std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level); + auto results = batch_rule(LD_value, LD_bdim, pivots_value, pivots_bdim, B_value, B_bdim, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_lstsq_generated_plumbing(const at::Tensor & self, const at::Tensor & b, c10::optional rcond, c10::optional driver) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(b, cur_level)) { + return at::_ops::linalg_lstsq::call(self, b, rcond, driver); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor b_value; + optional b_bdim; + std::tie(b_value, b_bdim) = unwrapTensorAtLevel(b, cur_level); + auto results = batch_rule(self_value, self_bdim, b_value, b_bdim, rcond, driver); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +at::Tensor linalg_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::linalg_matmul::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_vecdot_generated_plumbing(const at::Tensor & x, const at::Tensor & y, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(y, cur_level)) { + return at::_ops::linalg_vecdot::call(x, y, dim); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor y_value; + optional y_bdim; + std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level); + auto results = batch_rule(x_value, x_bdim, y_value, y_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_exp_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_matrix_exp::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _linalg_slogdet_generated_plumbing(const at::Tensor & A) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::_linalg_slogdet::call(A); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple linalg_slogdet_generated_plumbing(const at::Tensor & A) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_slogdet::call(A); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple slogdet_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::slogdet::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor logdet_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logdet::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_eig_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_eig::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _linalg_eigvals_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_linalg_eigvals::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_eigvals_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_eigvals::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _linalg_eigh_generated_plumbing(const at::Tensor & A, c10::string_view UPLO, bool compute_v) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::_linalg_eigh::call(A, UPLO, compute_v); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, UPLO, compute_v); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple linalg_eigh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_eigh::call(self, UPLO); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, UPLO); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor linalg_eigvalsh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_eigvalsh::call(self, UPLO); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, UPLO); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_householder_product_generated_plumbing(const at::Tensor & input, const at::Tensor & tau) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tau, cur_level)) { + return at::_ops::linalg_householder_product::call(input, tau); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor tau_value; + optional tau_bdim; + std::tie(tau_value, tau_bdim) = unwrapTensorAtLevel(tau, cur_level); + auto results = batch_rule(input_value, input_bdim, tau_value, tau_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_inv_ex_generated_plumbing(const at::Tensor & A, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_inv_ex::call(A, check_errors); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor linalg_inv_generated_plumbing(const at::Tensor & A) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_inv::call(A); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor inverse_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::inverse::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor inner_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::inner::call(self, other); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor outer_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) { + return at::_ops::outer::call(self, vec2); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor vec2_value; + optional vec2_bdim; + std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level); + auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ger_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) { + return at::_ops::ger::call(self, vec2); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor vec2_value; + optional vec2_bdim; + std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level); + auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_norm_generated_plumbing(const at::Tensor & self, const c10::optional & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_norm::call(self, ord, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_norm_ord_str_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_norm_ord_str::call(self, ord, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_vector_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_vector_norm::call(self, ord, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_matrix_norm::call(self, ord, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_norm_str_ord_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_matrix_norm_str_ord::call(self, ord, dim, keepdim, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional driver) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::_linalg_svd::call(A, full_matrices, compute_uv, driver); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, full_matrices, compute_uv, driver); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, c10::optional driver) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_svd::call(A, full_matrices, driver); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, full_matrices, driver); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor linalg_svdvals_generated_plumbing(const at::Tensor & A, c10::optional driver) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_svdvals::call(A, driver); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, driver); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_cond_generated_plumbing(const at::Tensor & self, const c10::optional & p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_cond::call(self, p); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_cond_p_str_generated_plumbing(const at::Tensor & self, c10::string_view p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_cond_p_str::call(self, p); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_pinv_atol_rtol_tensor_generated_plumbing(const at::Tensor & self, const c10::optional & atol, const c10::optional & rtol, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) { + return at::_ops::linalg_pinv_atol_rtol_tensor::call(self, atol, rtol, hermitian); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional atol_value; + optional atol_bdim; + if (atol) { + std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level); + } + optional rtol_value; + optional rtol_bdim; + if (rtol) { + std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_pinv_atol_rtol_float_generated_plumbing(const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_pinv_atol_rtol_float::call(self, atol, rtol, hermitian); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_pinv_generated_plumbing(const at::Tensor & self, double rcond, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_pinv::call(self, rcond, hermitian); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, rcond, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_pinv_rcond_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & rcond, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(rcond, cur_level)) { + return at::_ops::linalg_pinv_rcond_tensor::call(self, rcond, hermitian); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor rcond_value; + optional rcond_bdim; + std::tie(rcond_value, rcond_bdim) = unwrapTensorAtLevel(rcond, cur_level); + auto results = batch_rule(self_value, self_bdim, rcond_value, rcond_bdim, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) { + return at::_ops::_linalg_solve_ex::call(A, B, left, check_errors); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + Tensor B_value; + optional B_bdim; + std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level); + auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) { + return at::_ops::linalg_solve_ex::call(A, B, left, check_errors); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + Tensor B_value; + optional B_bdim; + std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level); + auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor linalg_solve_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) { + return at::_ops::linalg_solve::call(A, B, left); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + Tensor B_value; + optional B_bdim; + std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level); + auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_tensorinv_generated_plumbing(const at::Tensor & self, int64_t ind) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_tensorinv::call(self, ind); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, ind); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_tensorsolve_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::linalg_tensorsolve::call(self, other, dims); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_qr_generated_plumbing(const at::Tensor & A, c10::string_view mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_qr::call(A, mode); + } + Tensor A_value; + optional A_bdim; + std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, mode); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor linalg_matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_matrix_power::call(self, n); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_rank_atol_rtol_tensor_generated_plumbing(const at::Tensor & input, const c10::optional & atol, const c10::optional & rtol, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) { + return at::_ops::linalg_matrix_rank_atol_rtol_tensor::call(input, atol, rtol, hermitian); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + optional atol_value; + optional atol_bdim; + if (atol) { + std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level); + } + optional rtol_value; + optional rtol_bdim; + if (rtol) { + std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_rank_atol_rtol_float_generated_plumbing(const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_matrix_rank_atol_rtol_float::call(self, atol, rtol, hermitian); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_rank_generated_plumbing(const at::Tensor & self, double tol, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_matrix_rank::call(self, tol, hermitian); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, tol, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_rank_tol_tensor_generated_plumbing(const at::Tensor & input, const at::Tensor & tol, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tol, cur_level)) { + return at::_ops::linalg_matrix_rank_tol_tensor::call(input, tol, hermitian); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor tol_value; + optional tol_bdim; + std::tie(tol_value, tol_bdim) = unwrapTensorAtLevel(tol, cur_level); + auto results = batch_rule(input_value, input_bdim, tol_value, tol_bdim, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_multi_dot_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::linalg_multi_dot::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nested_to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nested_to_padded_tensor::call(self, padding, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_serialization_subcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_test_serialization_subcmul::call(self, other, alpha); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_parallel_materialize_generated_plumbing(const at::Tensor & self, int64_t num_parallel, bool skip_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_parallel_materialize::call(self, num_parallel, skip_first); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, num_parallel, skip_first); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_optional_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(values, cur_level)) { + return at::_ops::_test_optional_intlist::call(values, addends); + } + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(values_value, values_bdim, addends); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_optional_filled_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(values, cur_level)) { + return at::_ops::_test_optional_filled_intlist::call(values, addends); + } + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(values_value, values_bdim, addends); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_optional_floatlist_generated_plumbing(const at::Tensor & values, c10::optional> addends) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(values, cur_level)) { + return at::_ops::_test_optional_floatlist::call(values, addends); + } + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(values_value, values_bdim, addends); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_string_default_generated_plumbing(const at::Tensor & dummy, c10::string_view a, c10::string_view b) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dummy, cur_level)) { + return at::_ops::_test_string_default::call(dummy, a, b); + } + Tensor dummy_value; + optional dummy_bdim; + std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level); + auto results = batch_rule(dummy_value, dummy_bdim, a, b); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_ambiguous_defaults_a_generated_plumbing(const at::Tensor & dummy, int64_t a, int64_t b) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dummy, cur_level)) { + return at::_ops::_test_ambiguous_defaults_a::call(dummy, a, b); + } + Tensor dummy_value; + optional dummy_bdim; + std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level); + auto results = batch_rule(dummy_value, dummy_bdim, a, b); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_ambiguous_defaults_b_generated_plumbing(const at::Tensor & dummy, int64_t a, c10::string_view b) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dummy, cur_level)) { + return at::_ops::_test_ambiguous_defaults_b::call(dummy, a, b); + } + Tensor dummy_value; + optional dummy_bdim; + std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level); + auto results = batch_rule(dummy_value, dummy_bdim, a, b); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_warn_in_autograd_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_warn_in_autograd::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_autograd_multiple_dispatch_fullcoverage_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_autograd_multiple_dispatch_ntonly_generated_plumbing(const at::Tensor & self, bool b) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_autograd_multiple_dispatch_ntonly::call(self, b); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, b); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_autograd_multiple_dispatch_view_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_autograd_multiple_dispatch_view::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_autograd_multiple_dispatch_view_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_autograd_multiple_dispatch_view_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor segment_reduce_generated_plumbing(const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level)) { + return at::_ops::segment_reduce::call(data, reduce, lengths, indices, offsets, axis, unsafe, initial); + } + Tensor data_value; + optional data_bdim; + std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level); + optional lengths_value; + optional lengths_bdim; + if (lengths) { + std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level); + } + optional indices_value; + optional indices_bdim; + if (indices) { + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices.value(), cur_level); + } + optional offsets_value; + optional offsets_bdim; + if (offsets) { + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level); + } + auto results = batch_rule(data_value, data_bdim, reduce, lengths_value, lengths_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, axis, unsafe, initial); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _segment_reduce_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & offsets, int64_t axis, const c10::optional & initial) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(offsets, cur_level)) { + return at::_ops::_segment_reduce_backward::call(grad, output, data, reduce, lengths, offsets, axis, initial); + } + Tensor grad_value; + optional grad_bdim; + std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + Tensor data_value; + optional data_bdim; + std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level); + optional lengths_value; + optional lengths_bdim; + if (lengths) { + std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level); + } + optional offsets_value; + optional offsets_bdim; + if (offsets) { + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level); + } + auto results = batch_rule(grad_value, grad_bdim, output_value, output_bdim, data_value, data_bdim, reduce, lengths_value, lengths_bdim, offsets_value, offsets_bdim, axis, initial); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pad_sequence_generated_plumbing(at::TensorList sequences, bool batch_first, double padding_value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(sequences, cur_level)) { + return at::_ops::pad_sequence::call(sequences, batch_first, padding_value); + } + + auto results = batch_rule(sequences, batch_first, padding_value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flatten_dense_tensors_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::flatten_dense_tensors::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector unflatten_dense_tensors_generated_plumbing(const at::Tensor & flat, at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(flat, cur_level) && !isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::unflatten_dense_tensors::call(flat, tensors); + } + Tensor flat_value; + optional flat_bdim; + std::tie(flat_value, flat_bdim) = unwrapTensorAtLevel(flat, cur_level); + auto results = batch_rule(flat_value, flat_bdim, tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_tensor_from_tensor_list_generated_plumbing(at::TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(list, cur_level)) { + return at::_ops::_nested_tensor_from_tensor_list::call(list, dtype, layout, device, pin_memory); + } + + auto results = batch_rule(list, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _fw_primal_copy_generated_plumbing(const at::Tensor & self, int64_t level) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fw_primal_copy::call(self, level); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, level); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _make_dual_copy_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) { + return at::_ops::_make_dual_copy::call(primal, tangent, level); + } + Tensor primal_value; + optional primal_bdim; + std::tie(primal_value, primal_bdim) = unwrapTensorAtLevel(primal, cur_level); + Tensor tangent_value; + optional tangent_bdim; + std::tie(tangent_value, tangent_bdim) = unwrapTensorAtLevel(tangent, cur_level); + auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_as_real_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_as_real_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_as_complex_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_as_complex_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _conj_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_conj_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _neg_view_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_neg_view_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor as_strided_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::as_strided_copy::call(self, size, stride, storage_offset); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_broadcast_to_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_broadcast_to_copy::call(self, size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diagonal_copy_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::diagonal_copy::call(self, offset, dim1, dim2); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor expand_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::expand_copy::call(self, size, implicit); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, implicit); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor permute_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::permute_copy::call(self, dims); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _reshape_alias_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_reshape_alias_copy::call(self, size, stride); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, stride); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor select_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::select_copy_int::call(self, dim, index); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor detach_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::detach_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slice_copy_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::slice_copy_Tensor::call(self, dim, start, end, step); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, start, end, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector split_copy_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::split_copy_Tensor::call(self, split_size, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_size, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector split_with_sizes_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_sizes, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_copy_dim_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze_copy_dim::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_copy_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze_copy_dims::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor t_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::t_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor transpose_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::transpose_copy_int::call(self, dim0, dim1); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim0, dim1); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor unsqueeze_copy_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unsqueeze_copy::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _indices_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_indices_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _values_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_values_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor indices_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::indices_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor values_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::values_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor crow_indices_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::crow_indices_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor col_indices_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::col_indices_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ccol_indices_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ccol_indices_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor row_indices_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::row_indices_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector unbind_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unbind_copy_int::call(self, dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void unbind_copy_int_out_generated_plumbing(const at::Tensor & self, int64_t dim, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::unbind_copy_int_out::call(self, dim, out); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dim, out); +} +template +void split_copy_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, split_size, dim, out); +} +template +void split_with_sizes_copy_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, split_sizes, dim, out); +} +template +at::Tensor view_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_copy::call(self, size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_copy_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_copy_dtype::call(self, dtype); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor unfold_copy_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unfold_copy::call(self, dimension, size, step); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dimension, size, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor alias_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::alias_copy::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_padded_tensor::call(self, padding, output_size); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_tensor_softmax_with_shape_generated_plumbing(const at::Tensor & self, const at::Tensor & query) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(query, cur_level)) { + return at::_ops::_nested_tensor_softmax_with_shape::call(self, query); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + auto results = batch_rule(self_value, self_bdim, query_value, query_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _transformer_encoder_layer_fwd_generated_plumbing(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(norm_weight_1, cur_level) && !isBatchedAtLevel(norm_bias_1, cur_level) && !isBatchedAtLevel(norm_weight_2, cur_level) && !isBatchedAtLevel(norm_bias_2, cur_level) && !isBatchedAtLevel(ffn_weight_1, cur_level) && !isBatchedAtLevel(ffn_bias_1, cur_level) && !isBatchedAtLevel(ffn_weight_2, cur_level) && !isBatchedAtLevel(ffn_bias_2, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_transformer_encoder_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type); + } + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + Tensor qkv_weight_value; + optional qkv_weight_bdim; + std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level); + Tensor qkv_bias_value; + optional qkv_bias_bdim; + std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level); + Tensor proj_weight_value; + optional proj_weight_bdim; + std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level); + Tensor proj_bias_value; + optional proj_bias_bdim; + std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level); + Tensor norm_weight_1_value; + optional norm_weight_1_bdim; + std::tie(norm_weight_1_value, norm_weight_1_bdim) = unwrapTensorAtLevel(norm_weight_1, cur_level); + Tensor norm_bias_1_value; + optional norm_bias_1_bdim; + std::tie(norm_bias_1_value, norm_bias_1_bdim) = unwrapTensorAtLevel(norm_bias_1, cur_level); + Tensor norm_weight_2_value; + optional norm_weight_2_bdim; + std::tie(norm_weight_2_value, norm_weight_2_bdim) = unwrapTensorAtLevel(norm_weight_2, cur_level); + Tensor norm_bias_2_value; + optional norm_bias_2_bdim; + std::tie(norm_bias_2_value, norm_bias_2_bdim) = unwrapTensorAtLevel(norm_bias_2, cur_level); + Tensor ffn_weight_1_value; + optional ffn_weight_1_bdim; + std::tie(ffn_weight_1_value, ffn_weight_1_bdim) = unwrapTensorAtLevel(ffn_weight_1, cur_level); + Tensor ffn_bias_1_value; + optional ffn_bias_1_bdim; + std::tie(ffn_bias_1_value, ffn_bias_1_bdim) = unwrapTensorAtLevel(ffn_bias_1, cur_level); + Tensor ffn_weight_2_value; + optional ffn_weight_2_bdim; + std::tie(ffn_weight_2_value, ffn_weight_2_bdim) = unwrapTensorAtLevel(ffn_weight_2, cur_level); + Tensor ffn_bias_2_value; + optional ffn_bias_2_bdim; + std::tie(ffn_bias_2_value, ffn_bias_2_bdim) = unwrapTensorAtLevel(ffn_bias_2, cur_level); + optional mask_value; + optional mask_bdim; + if (mask) { + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level); + } + auto results = batch_rule(src_value, src_bdim, embed_dim, num_heads, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, use_gelu, norm_first, eps, norm_weight_1_value, norm_weight_1_bdim, norm_bias_1_value, norm_bias_1_bdim, norm_weight_2_value, norm_weight_2_bdim, norm_bias_2_value, norm_bias_2_bdim, ffn_weight_1_value, ffn_weight_1_bdim, ffn_bias_1_value, ffn_bias_1_bdim, ffn_weight_2_value, ffn_weight_2_bdim, ffn_bias_2_value, ffn_bias_2_bdim, mask_value, mask_bdim, mask_type); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _native_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_native_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type); + } + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + Tensor key_value; + optional key_bdim; + std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + Tensor qkv_weight_value; + optional qkv_weight_bdim; + std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level); + Tensor qkv_bias_value; + optional qkv_bias_bdim; + std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level); + Tensor proj_weight_value; + optional proj_weight_bdim; + std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level); + Tensor proj_bias_value; + optional proj_bias_bdim; + std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level); + optional mask_value; + optional mask_bdim; + if (mask) { + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim, need_weights, average_attn_weights, mask_type); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor scaled_dot_product_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask, double dropout_p, bool is_causal, c10::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) { + return at::_ops::scaled_dot_product_attention::call(query, key, value, attn_mask, dropout_p, is_causal, scale); + } + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + Tensor key_value; + optional key_bdim; + std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + optional attn_mask_value; + optional attn_mask_bdim; + if (attn_mask) { + std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal, scale); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _scaled_dot_product_attention_math_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask, double dropout_p, bool is_causal, const c10::optional & dropout_mask, c10::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level) && !isBatchedAtLevel(dropout_mask, cur_level)) { + return at::_ops::_scaled_dot_product_attention_math::call(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale); + } + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + Tensor key_value; + optional key_bdim; + std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + optional attn_mask_value; + optional attn_mask_bdim; + if (attn_mask) { + std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level); + } + optional dropout_mask_value; + optional dropout_mask_bdim; + if (dropout_mask) { + std::tie(dropout_mask_value, dropout_mask_bdim) = unwrapTensorAtLevel(dropout_mask.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal, dropout_mask_value, dropout_mask_bdim, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_flash_attention_for_cpu_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, const c10::optional & attn_mask, c10::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) { + return at::_ops::_scaled_dot_product_flash_attention_for_cpu::call(query, key, value, dropout_p, is_causal, attn_mask, scale); + } + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + Tensor key_value; + optional key_bdim; + std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + optional attn_mask_value; + optional attn_mask_bdim; + if (attn_mask) { + std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, dropout_p, is_causal, attn_mask_value, attn_mask_bdim, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) { + return at::_ops::_scaled_dot_product_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); + } + Tensor grad_out_value; + optional grad_out_bdim; + std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level); + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + Tensor key_value; + optional key_bdim; + std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + Tensor out_value; + optional out_bdim; + std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level); + Tensor logsumexp_value; + optional logsumexp_bdim; + std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level); + Tensor cum_seq_q_value; + optional cum_seq_q_bdim; + std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q, cur_level); + Tensor cum_seq_k_value; + optional cum_seq_k_bdim; + std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k, cur_level); + Tensor philox_seed_value; + optional philox_seed_bdim; + std::tie(philox_seed_value, philox_seed_bdim) = unwrapTensorAtLevel(philox_seed, cur_level); + Tensor philox_offset_value; + optional philox_offset_bdim; + std::tie(philox_offset_value, philox_offset_bdim) = unwrapTensorAtLevel(philox_offset, cur_level); + auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_flash_attention_for_cpu_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, double dropout_p, bool is_causal, const c10::optional & attn_mask, c10::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) { + return at::_ops::_scaled_dot_product_flash_attention_for_cpu_backward::call(grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale); + } + Tensor grad_out_value; + optional grad_out_bdim; + std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level); + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + Tensor key_value; + optional key_bdim; + std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + Tensor out_value; + optional out_bdim; + std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level); + Tensor logsumexp_value; + optional logsumexp_bdim; + std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level); + optional attn_mask_value; + optional attn_mask_bdim; + if (attn_mask) { + std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level); + } + auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, dropout_p, is_causal, attn_mask_value, attn_mask_bdim, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_efficient_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, c10::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_bias, cur_level)) { + return at::_ops::_scaled_dot_product_efficient_attention::call(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, scale); + } + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + Tensor key_value; + optional key_bdim; + std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + optional attn_bias_value; + optional attn_bias_bdim; + if (attn_bias) { + std::tie(attn_bias_value, attn_bias_bdim) = unwrapTensorAtLevel(attn_bias.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_bias_value, attn_bias_bdim, compute_log_sumexp, dropout_p, is_causal, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array grad_input_mask, bool is_causal, c10::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_bias, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) { + return at::_ops::_scaled_dot_product_efficient_attention_backward::call(grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale); + } + Tensor grad_out__value; + optional grad_out__bdim; + std::tie(grad_out__value, grad_out__bdim) = unwrapTensorAtLevel(grad_out_, cur_level); + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + Tensor key_value; + optional key_bdim; + std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + Tensor attn_bias_value; + optional attn_bias_bdim; + std::tie(attn_bias_value, attn_bias_bdim) = unwrapTensorAtLevel(attn_bias, cur_level); + Tensor out_value; + optional out_bdim; + std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level); + Tensor logsumexp_value; + optional logsumexp_bdim; + std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level); + Tensor philox_seed_value; + optional philox_seed_bdim; + std::tie(philox_seed_value, philox_seed_bdim) = unwrapTensorAtLevel(philox_seed, cur_level); + Tensor philox_offset_value; + optional philox_offset_bdim; + std::tie(philox_offset_value, philox_offset_bdim) = unwrapTensorAtLevel(philox_offset, cur_level); + auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_bias_value, attn_bias_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, dropout_p, grad_input_mask, is_causal, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_cudnn_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::_scaled_dot_product_cudnn_attention::call(query, key, value, dropout_p, is_causal, return_debug_mask, scale); + } + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + Tensor key_value; + optional key_bdim; + std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, dropout_p, is_causal, return_debug_mask, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple _flash_attention_forward_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & cum_seq_q, const c10::optional & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level)) { + return at::_ops::_flash_attention_forward::call(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale); + } + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + Tensor key_value; + optional key_bdim; + std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + optional cum_seq_q_value; + optional cum_seq_q_bdim; + if (cum_seq_q) { + std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q.value(), cur_level); + } + optional cum_seq_k_value; + optional cum_seq_k_bdim; + if (cum_seq_k) { + std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple _flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) { + return at::_ops::_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); + } + Tensor grad_out_value; + optional grad_out_bdim; + std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level); + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + Tensor key_value; + optional key_bdim; + std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + Tensor out_value; + optional out_bdim; + std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level); + Tensor logsumexp_value; + optional logsumexp_bdim; + std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level); + Tensor cum_seq_q_value; + optional cum_seq_q_bdim; + std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q, cur_level); + Tensor cum_seq_k_value; + optional cum_seq_k_bdim; + std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k, cur_level); + Tensor philox_seed_value; + optional philox_seed_bdim; + std::tie(philox_seed_value, philox_seed_bdim) = unwrapTensorAtLevel(philox_seed, cur_level); + Tensor philox_offset_value; + optional philox_offset_bdim; + std::tie(philox_offset_value, philox_offset_bdim) = unwrapTensorAtLevel(philox_offset, cur_level); + auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & bias, const at::Tensor & out, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, c10::SymInt max_seqlen_q, c10::SymInt max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, c10::optional scale, c10::optional num_splits_key) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(cu_seqlens_q, cur_level) && !isBatchedAtLevel(cu_seqlens_k, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) { + return at::_ops::_efficient_attention_backward::call(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key); + } + Tensor grad_out__value; + optional grad_out__bdim; + std::tie(grad_out__value, grad_out__bdim) = unwrapTensorAtLevel(grad_out_, cur_level); + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + Tensor key_value; + optional key_bdim; + std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + Tensor out_value; + optional out_bdim; + std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level); + Tensor logsumexp_value; + optional logsumexp_bdim; + std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level); + Tensor philox_seed_value; + optional philox_seed_bdim; + std::tie(philox_seed_value, philox_seed_bdim) = unwrapTensorAtLevel(philox_seed, cur_level); + Tensor philox_offset_value; + optional philox_offset_bdim; + std::tie(philox_offset_value, philox_offset_bdim) = unwrapTensorAtLevel(philox_offset, cur_level); + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + optional cu_seqlens_q_value; + optional cu_seqlens_q_bdim; + if (cu_seqlens_q) { + std::tie(cu_seqlens_q_value, cu_seqlens_q_bdim) = unwrapTensorAtLevel(cu_seqlens_q.value(), cur_level); + } + optional cu_seqlens_k_value; + optional cu_seqlens_k_bdim; + if (cu_seqlens_k) { + std::tie(cu_seqlens_k_value, cu_seqlens_k_bdim) = unwrapTensorAtLevel(cu_seqlens_k.value(), cur_level); + } + auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, bias_value, bias_bdim, out_value, out_bdim, cu_seqlens_q_value, cu_seqlens_q_bdim, cu_seqlens_k_value, cu_seqlens_k_bdim, max_seqlen_q, max_seqlen_k, logsumexp_value, logsumexp_bdim, dropout_p, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, custom_mask_type, bias_requires_grad, scale, num_splits_key); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +at::Tensor _triton_scaled_dot_attention_generated_plumbing(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(q, cur_level) && !isBatchedAtLevel(k, cur_level) && !isBatchedAtLevel(v, cur_level)) { + return at::_ops::_triton_scaled_dot_attention::call(q, k, v, dropout_p); + } + Tensor q_value; + optional q_bdim; + std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level); + Tensor k_value; + optional k_bdim; + std::tie(k_value, k_bdim) = unwrapTensorAtLevel(k, cur_level); + Tensor v_value; + optional v_bdim; + std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level); + auto results = batch_rule(q_value, q_bdim, k_value, k_bdim, v_value, v_bdim, dropout_p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & _fill_mem_eff_dropout_mask__generated_plumbing(at::Tensor & self, double dropout_p, int64_t seed, int64_t offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fill_mem_eff_dropout_mask_::call(self, dropout_p, seed, offset); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dropout_p, seed, offset); + return self; +} +template +at::Tensor _triton_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_triton_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask); + } + Tensor query_value; + optional query_bdim; + std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level); + Tensor key_value; + optional key_bdim; + std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level); + Tensor value_value; + optional value_bdim; + std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level); + Tensor qkv_weight_value; + optional qkv_weight_bdim; + std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level); + Tensor qkv_bias_value; + optional qkv_bias_bdim; + std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level); + Tensor proj_weight_value; + optional proj_weight_bdim; + std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level); + Tensor proj_bias_value; + optional proj_bias_bdim; + std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level); + optional mask_value; + optional mask_bdim; + if (mask) { + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_airy_ai_generated_plumbing(const at::Tensor & x) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_airy_ai::call(x); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_bessel_j0_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_bessel_j0::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_bessel_j1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_bessel_j1::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_bessel_y0_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_bessel_y0::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_bessel_y1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_bessel_y1::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_t::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_t_x_scalar::call(x, n); + } + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_chebyshev_polynomial_t_n_scalar::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_u::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_u_x_scalar::call(x, n); + } + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_chebyshev_polynomial_u_n_scalar::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_v::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_v_x_scalar::call(x, n); + } + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_chebyshev_polynomial_v_n_scalar::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_w::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_w_x_scalar::call(x, n); + } + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_chebyshev_polynomial_w_n_scalar::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_hermite_polynomial_h_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_hermite_polynomial_h::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_hermite_polynomial_h_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_hermite_polynomial_h_x_scalar::call(x, n); + } + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_hermite_polynomial_h_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_hermite_polynomial_h_n_scalar::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_hermite_polynomial_he_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_hermite_polynomial_he::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_hermite_polynomial_he_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_hermite_polynomial_he_x_scalar::call(x, n); + } + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_hermite_polynomial_he_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_hermite_polynomial_he_n_scalar::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_laguerre_polynomial_l_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_laguerre_polynomial_l::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_laguerre_polynomial_l_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_laguerre_polynomial_l_x_scalar::call(x, n); + } + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_laguerre_polynomial_l_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_laguerre_polynomial_l_n_scalar::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_legendre_polynomial_p_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_legendre_polynomial_p::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_legendre_polynomial_p_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_legendre_polynomial_p_x_scalar::call(x, n); + } + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_legendre_polynomial_p_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_legendre_polynomial_p_n_scalar::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_modified_bessel_i0_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_modified_bessel_i0::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_modified_bessel_i1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_modified_bessel_i1::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_modified_bessel_k0_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_modified_bessel_k0::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_modified_bessel_k1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_modified_bessel_k1::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_scaled_modified_bessel_k0_generated_plumbing(const at::Tensor & x) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_scaled_modified_bessel_k0::call(x); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_scaled_modified_bessel_k1_generated_plumbing(const at::Tensor & x) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_scaled_modified_bessel_k1::call(x); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_t::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar::call(x, n); + } + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_u::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar::call(x, n); + } + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_v::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar::call(x, n); + } + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_w::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar::call(x, n); + } + Tensor n_value; + optional n_bdim; + std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar::call(x, n); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_spherical_bessel_j0_generated_plumbing(const at::Tensor & x) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_spherical_bessel_j0::call(x); + } + Tensor x_value; + optional x_bdim; + std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _foobar_generated_plumbing(const at::Tensor & self, bool arg1, bool arg2, bool arg3) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foobar::call(self, arg1, arg2, arg3); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, arg1, arg2, arg3); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _fused_adam__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adam_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); +} +template +void _fused_adam__tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adam__tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + Tensor lr_value; + optional lr_bdim; + std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level); + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); +} +template +void _fused_adamw__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adamw_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); +} +template +void _fused_adamw__tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adamw__tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + Tensor lr_value; + optional lr_bdim; + std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level); + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); +} +template +void _fused_sgd__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_sgd_::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + } + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); +} +template +void _fused_sgd__tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_sgd__tensor_lr::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + } + Tensor lr_value; + optional lr_bdim; + std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level); + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr_value, lr_bdim, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); +} +template +void _propagate_xla_data_generated_plumbing(const at::Tensor & input, const at::Tensor & output) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::_propagate_xla_data::call(input, output); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + batch_rule(input_value, input_bdim, output_value, output_bdim); +} +template +void _cudnn_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) { + return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_buf_value; + optional weight_buf_bdim; + std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + Tensor reserve_value; + optional reserve_bdim; + std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level); + Tensor out0_value; + optional out0_bdim; + std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level); + Tensor out1_value; + optional out1_bdim; + std::tie(out1_value, out1_bdim) = unwrapTensorAtLevel(out1, cur_level); + Tensor out2_value; + optional out2_bdim; + std::tie(out2_value, out2_bdim) = unwrapTensorAtLevel(out2, cur_level); + optional cx_value; + optional cx_bdim; + if (cx) { + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level); + } + optional grad_output_value; + optional grad_output_bdim; + if (grad_output) { + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level); + } + optional grad_hy_value; + optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + optional grad_cy_value; + optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + optional dropout_state_value; + optional dropout_state_bdim; + if (dropout_state) { + std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level); + } + batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3); +} +template +at::Tensor bernoulli_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & p, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) { + return at::_ops::bernoulli_Tensor::call(self, p, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor p_value; + optional p_bdim; + std::tie(p_value, p_bdim) = unwrapTensorAtLevel(p, cur_level); + auto results = batch_rule(self_value, self_bdim, p_value, p_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor embedding_renorm_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::embedding_renorm::call(self, indices, max_norm, norm_type); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor indices_value; + optional indices_bdim; + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor resize_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::resize::call(self, size, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _resize_output_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_resize_output::call(self, size, device); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, device); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _index_put_impl_generated_plumbing(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, bool unsafe) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_index_put_impl::call(self, indices, values, accumulate, unsafe); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor values_value; + optional values_bdim; + std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void miopen_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) { + return at::_ops::miopen_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor weight_buf_value; + optional weight_buf_bdim; + std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level); + Tensor hx_value; + optional hx_bdim; + std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level); + Tensor output_value; + optional output_bdim; + std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level); + Tensor reserve_value; + optional reserve_bdim; + std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level); + Tensor out0_value; + optional out0_bdim; + std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level); + Tensor out1_value; + optional out1_bdim; + std::tie(out1_value, out1_bdim) = unwrapTensorAtLevel(out1, cur_level); + Tensor out2_value; + optional out2_bdim; + std::tie(out2_value, out2_bdim) = unwrapTensorAtLevel(out2, cur_level); + optional cx_value; + optional cx_bdim; + if (cx) { + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level); + } + optional grad_output_value; + optional grad_output_bdim; + if (grad_output) { + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level); + } + optional grad_hy_value; + optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + optional grad_cy_value; + optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + optional dropout_state_value; + optional dropout_state_bdim; + if (dropout_state) { + std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level); + } + batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3); +} +template +::std::tuple _native_batch_norm_legit_functional_generated_plumbing(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::_native_batch_norm_legit_functional::call(input, weight, bias, running_mean, running_var, training, momentum, eps); + } + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor running_mean_value; + optional running_mean_bdim; + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean, cur_level); + Tensor running_var_value; + optional running_var_bdim; + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + optional bias_value; + optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +void unsafe_split_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, split_size, dim, out); +} +template +void unsafe_split_with_sizes_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::unsafe_split_with_sizes_out::call(self, split_sizes, dim, out); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, split_sizes, dim, out); +} +template +at::Tensor resize_as_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template, c10::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) { + return at::_ops::resize_as::call(self, the_template, memory_format); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor the_template_value; + optional the_template_bdim; + std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level); + auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor resize_as_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) { + return at::_ops::resize_as_sparse::call(self, the_template); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor the_template_value; + optional the_template_bdim; + std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level); + auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor zero_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::zero::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_resize_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sparse_resize::call(self, size, sparse_dim, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_resize_and_clear_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sparse_resize_and_clear::call(self, size, sparse_dim, dense_dim); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _coalesced_generated_plumbing(const at::Tensor & self, bool coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_coalesced::call(self, coalesced); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, coalesced); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor copy_sparse_to_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::copy_sparse_to_sparse::call(self, src, non_blocking); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor src_value; + optional src_bdim; + std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void quantize_per_tensor_tensors_out_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::quantize_per_tensor_tensors_out::call(tensors, scales, zero_points, dtype, out); + } + Tensor scales_value; + optional scales_bdim; + std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level); + Tensor zero_points_value; + optional zero_points_bdim; + std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level); + batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype, out); +} +template +void dequantize_tensors_out_generated_plumbing(at::TensorList tensors, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::dequantize_tensors_out::call(tensors, out); + } + + batch_rule(tensors, out); +} +template +::std::tuple _fused_moving_avg_obs_fq_helper_functional_generated_plumbing(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(observer_on, cur_level) && !isBatchedAtLevel(fake_quant_on, cur_level) && !isBatchedAtLevel(running_min, cur_level) && !isBatchedAtLevel(running_max, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor observer_on_value; + optional observer_on_bdim; + std::tie(observer_on_value, observer_on_bdim) = unwrapTensorAtLevel(observer_on, cur_level); + Tensor fake_quant_on_value; + optional fake_quant_on_bdim; + std::tie(fake_quant_on_value, fake_quant_on_bdim) = unwrapTensorAtLevel(fake_quant_on, cur_level); + Tensor running_min_value; + optional running_min_bdim; + std::tie(running_min_value, running_min_bdim) = unwrapTensorAtLevel(running_min, cur_level); + Tensor running_max_value; + optional running_max_bdim; + std::tie(running_max_value, running_max_bdim) = unwrapTensorAtLevel(running_max, cur_level); + Tensor scale_value; + optional scale_bdim; + std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level); + Tensor zero_point_value; + optional zero_point_bdim; + std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, observer_on_value, observer_on_bdim, fake_quant_on_value, fake_quant_on_bdim, running_min_value, running_min_bdim, running_max_value, running_max_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level)); +} +template +void lstm_mps_backward_out_generated_plumbing(const c10::optional & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(layersOutputs, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level)) { + return at::_ops::lstm_mps_backward_out::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2); + } + Tensor z_state_value; + optional z_state_bdim; + std::tie(z_state_value, z_state_bdim) = unwrapTensorAtLevel(z_state, cur_level); + Tensor cell_state_fwd_value; + optional cell_state_fwd_bdim; + std::tie(cell_state_fwd_value, cell_state_fwd_bdim) = unwrapTensorAtLevel(cell_state_fwd, cur_level); + Tensor input_value; + optional input_bdim; + std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level); + Tensor layersOutputs_value; + optional layersOutputs_bdim; + std::tie(layersOutputs_value, layersOutputs_bdim) = unwrapTensorAtLevel(layersOutputs, cur_level); + Tensor out0_value; + optional out0_bdim; + std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level); + optional grad_y_value; + optional grad_y_bdim; + if (grad_y) { + std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y.value(), cur_level); + } + optional grad_hy_value; + optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + optional grad_cy_value; + optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, layersOutputs_value, layersOutputs_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_value, out0_bdim, out1, out2); +} +template +at::Tensor set_source_Storage_generated_plumbing(const at::Tensor & self, at::Storage source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::set_source_Storage::call(self, source); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, source); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor set_source_Storage_storage_offset_generated_plumbing(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, size, stride); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, source, storage_offset, size, stride); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor set_source_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::set_source_Tensor::call(self, source); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor source_value; + optional source_bdim; + std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, source_value, source_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor set_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::set::call(self); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor random_from_generated_plumbing(const at::Tensor & self, int64_t from, c10::optional to, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::random_from::call(self, from, to, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, from, to, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor random_to_generated_plumbing(const at::Tensor & self, int64_t to, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::random_to::call(self, to, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, to, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor random_generated_plumbing(const at::Tensor & self, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::random::call(self, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor uniform_generated_plumbing(const at::Tensor & self, double from, double to, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::uniform::call(self, from, to, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, from, to, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cauchy_generated_plumbing(const at::Tensor & self, double median, double sigma, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cauchy::call(self, median, sigma, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, median, sigma, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor log_normal_generated_plumbing(const at::Tensor & self, double mean, double std, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_normal::call(self, mean, std, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, mean, std, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor exponential_generated_plumbing(const at::Tensor & self, double lambd, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::exponential::call(self, lambd, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, lambd, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor geometric_generated_plumbing(const at::Tensor & self, double p, c10::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::geometric::call(self, p, generator); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _histogramdd_bin_edges_out_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_histogramdd_bin_edges_out::call(self, bins, range, weight, density, out); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + optional weight_value; + optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density, out); +} +template +void _amp_foreach_non_finite_check_and_unscale_out_generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out); + } + Tensor found_inf_value; + optional found_inf_bdim; + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level); + Tensor inv_scale_value; + optional inv_scale_bdim; + std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level); + batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim, out); +} +template +::std::tuple<::std::vector,at::Tensor> _amp_foreach_non_finite_check_and_unscale_generated_plumbing(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self, found_inf, inv_scale); + } + Tensor found_inf_value; + optional found_inf_bdim; + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level); + Tensor inv_scale_value; + optional inv_scale_bdim; + std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level); + auto results = batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _amp_update_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(growth_tracker, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_amp_update_scale::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); + } + Tensor self_value; + optional self_bdim; + std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level); + Tensor growth_tracker_value; + optional growth_tracker_bdim; + std::tie(growth_tracker_value, growth_tracker_bdim) = unwrapTensorAtLevel(growth_tracker, cur_level); + Tensor found_inf_value; + optional found_inf_bdim; + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level); + auto results = batch_rule(self_value, self_bdim, growth_tracker_value, growth_tracker_bdim, found_inf_value, found_inf_bdim, scale_growth_factor, scale_backoff_factor, growth_interval); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +void _foreach_add_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_add_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_add_List_out::call(self, other, alpha, out); + } + + batch_rule(self, other, alpha, out); +} +template +void _foreach_add_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_add_Tensor_out_generated_plumbing(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_add_Tensor_out::call(self, other, alpha, out); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self, other_value, other_bdim, alpha, out); +} +template +void _foreach_sub_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sub_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_sub_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sub_List_out::call(self, other, alpha, out); + } + + batch_rule(self, other, alpha, out); +} +template +void _foreach_sub_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sub_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_mul_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_mul_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_mul_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_mul_List_out::call(self, other, out); + } + + batch_rule(self, other, out); +} +template +void _foreach_mul_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_mul_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_mul_Tensor_out_generated_plumbing(at::TensorList self, const at::Tensor & other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_mul_Tensor_out::call(self, other, out); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self, other_value, other_bdim, out); +} +template +void _foreach_div_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_div_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_div_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_div_List_out::call(self, other, out); + } + + batch_rule(self, other, out); +} +template +void _foreach_div_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_div_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_div_Tensor_out_generated_plumbing(at::TensorList self, const at::Tensor & other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_div_Tensor_out::call(self, other, out); + } + Tensor other_value; + optional other_bdim; + std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level); + batch_rule(self, other_value, other_bdim, out); +} +template +void _foreach_clamp_max_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_clamp_max_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_clamp_max_List_out::call(self, other, out); + } + + batch_rule(self, other, out); +} +template +void _foreach_clamp_max_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_clamp_min_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_clamp_min_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_clamp_min_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_clamp_min_List_out::call(self, other, out); + } + + batch_rule(self, other, out); +} +template +void _foreach_clamp_min_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_clamp_min_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_maximum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_maximum_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_maximum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_maximum_List_out::call(self, other, out); + } + + batch_rule(self, other, out); +} +template +void _foreach_maximum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_maximum_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_minimum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_minimum_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_minimum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_minimum_List_out::call(self, other, out); + } + + batch_rule(self, other, out); +} +template +void _foreach_minimum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_minimum_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_addcdiv_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_addcdiv_Scalar_out::call(self, tensor1, tensor2, value, out); + } + + batch_rule(self, tensor1, tensor2, value, out); +} +template +void _foreach_addcdiv_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_addcdiv_ScalarList_out::call(self, tensor1, tensor2, scalars, out); + } + + batch_rule(self, tensor1, tensor2, scalars, out); +} +template +void _foreach_addcdiv_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_addcdiv_Tensor_out::call(self, tensor1, tensor2, scalars, out); + } + Tensor scalars_value; + optional scalars_bdim; + std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level); + batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out); +} +template +void _foreach_addcmul_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_addcmul_Scalar_out::call(self, tensor1, tensor2, value, out); + } + + batch_rule(self, tensor1, tensor2, value, out); +} +template +void _foreach_addcmul_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_addcmul_ScalarList_out::call(self, tensor1, tensor2, scalars, out); + } + + batch_rule(self, tensor1, tensor2, scalars, out); +} +template +void _foreach_addcmul_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_addcmul_Tensor_out::call(self, tensor1, tensor2, scalars, out); + } + Tensor scalars_value; + optional scalars_bdim; + std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level); + batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out); +} +template +void _foreach_abs_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_abs_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_acos_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_acos_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_asin_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_asin_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_atan_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_atan_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_ceil_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_ceil_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_cos_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_cos_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_cosh_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_cosh_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_erf_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_erf_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_erfc_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_erfc_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_exp_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_exp_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_expm1_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_expm1_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_floor_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_floor_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_frac_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_frac_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_lerp_List_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_lerp_List_out::call(self, tensors1, weights, out); + } + + batch_rule(self, tensors1, weights, out); +} +template +void _foreach_lerp_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_lerp_Scalar_out::call(self, tensors1, weight, out); + } + + batch_rule(self, tensors1, weight, out); +} +template +void _foreach_lgamma_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_lgamma_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_log_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_log_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_log10_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_log10_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_log1p_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_log1p_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_log2_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_log2_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_neg_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_neg_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_norm_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & ord, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_norm_Scalar_out::call(self, ord, out); + } + + batch_rule(self, ord, out); +} +template +void _foreach_pow_List_out_generated_plumbing(at::TensorList self, at::TensorList exponent, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_pow_List_out::call(self, exponent, out); + } + + batch_rule(self, exponent, out); +} +template +void _foreach_pow_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & exponent, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_pow_Scalar_out::call(self, exponent, out); + } + + batch_rule(self, exponent, out); +} +template +void _foreach_pow_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef exponent, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_pow_ScalarList_out::call(self, exponent, out); + } + + batch_rule(self, exponent, out); +} +template +void _foreach_reciprocal_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_reciprocal_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_round_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_round_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_sigmoid_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sigmoid_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_sign_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sign_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_sin_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sin_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_sinh_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sinh_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_sqrt_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sqrt_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_tan_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_tan_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_tanh_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_tanh_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_trunc_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_trunc_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_zero_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_zero_out::call(self, out); + } + + batch_rule(self, out); +} +template +::std::vector _foreach_zero_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_zero::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_copy_out_generated_plumbing(at::TensorList self, at::TensorList src, bool non_blocking, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_copy_out::call(self, src, non_blocking, out); + } + + batch_rule(self, src, non_blocking, out); +} +template +::std::vector _foreach_copy_generated_plumbing(at::TensorList self, at::TensorList src, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::_foreach_copy::call(self, src, non_blocking); + } + + auto results = batch_rule(self, src, non_blocking); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _fused_adam_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out); +} +template +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adam::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +void _fused_adam_tensor_lr_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_fused_adam_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + Tensor lr_value; + optional lr_bdim; + std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level); + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out); +} +template +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam_tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adam_tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + Tensor lr_value; + optional lr_bdim; + std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level); + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +void _fused_adamw_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_fused_adamw_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out); +} +template +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adamw::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +void _fused_adamw_tensor_lr_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_fused_adamw_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + Tensor lr_value; + optional lr_bdim; + std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level); + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out); +} +template +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw_tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adamw_tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + Tensor lr_value; + optional lr_bdim; + std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level); + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +void _fused_sgd_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_fused_sgd_out::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out); + } + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out); +} +template +::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_sgd::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + } + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + auto results = batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +void _fused_sgd_tensor_lr_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_fused_sgd_tensor_lr_out::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out); + } + Tensor lr_value; + optional lr_bdim; + std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level); + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr_value, lr_bdim, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out); +} +template +::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd_tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_sgd_tensor_lr::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + } + Tensor lr_value; + optional lr_bdim; + std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level); + optional grad_scale_value; + optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + optional found_inf_value; + optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + auto results = batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr_value, lr_bdim, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level)); +} + +}} // namespace at::functorch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/WrapDimUtils.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/WrapDimUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..142665b7c8b27b64be1ca7b3b44aaca132141c77 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/WrapDimUtils.h @@ -0,0 +1,153 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace at { + +// if dim_post_expr is 0 and wrap_scalar is true, then dim must be in the +// range [-1, 0]. This is a special case for scalar tensors and manifests in +// e.g. torch.sum(scalar_tensor, 0) Otherwise, dim should be in the range +// [-dim_post_expr, dim_post_expr-1]. +using c10::maybe_wrap_dim; + +inline int64_t maybe_wrap_dim(int64_t dim, TensorImpl* tensor) { + return maybe_wrap_dim(dim, tensor->dim()); +} + +inline int64_t maybe_wrap_dim(int64_t dim, TensorList tensors) { + if (tensors.empty()) { + // can't wrap empty TensorList; rely on underlying implementation to throw + // error if necessary. + return dim; + } + return maybe_wrap_dim(dim, tensors[0].dim()); +} + +inline int64_t maybe_wrap_dim( + int64_t dim, + const std::vector>& tensor_sizes) { + if (tensor_sizes.empty()) { + // can't wrap empty list; rely on underlying implementation to throw error + // if necessary + return dim; + } + return maybe_wrap_dim(dim, tensor_sizes[0].size()); +} + +// Given an array of dimensions `dims` of length `ndims`, this function "Wraps" +// each dim in-place for a tensor of rank `dim_post_expr`, allowing dims to be +// specified using negative indices. +// +// Additionally, if `wrap_scalar` is true then scalar tensors with rank 0, will +// allow dimensions in the range [-1, 0]. Otherwise, an IndexError is raised for +// dimensions not in the range [-dim_post_expr, dim_post_expr). +inline void maybe_wrap_dims_n( + int64_t* dims, + int64_t ndims, + int64_t dim_post_expr, + bool wrap_scalars = true) { + if (dim_post_expr <= 0) { + if (wrap_scalars) { + dim_post_expr = 1; // this will make range [-1, 0] + } else { + TORCH_CHECK_INDEX( + ndims == 0, + "Dimension specified as ", + dims[0], + " but tensor has no dimensions"); + return; + } + } + int64_t min = -dim_post_expr; + int64_t max = dim_post_expr - 1; + for (const auto i : c10::irange(ndims)) { + auto& dim = dims[i]; + if (dim < min || dim > max) { + TORCH_CHECK_INDEX( + false, + "Dimension out of range (expected to be in range of [", + min, + ", ", + max, + "], but got ", + dim, + ")"); + } + if (dim < 0) + dim += dim_post_expr; + } +} + +// Given a contiguous container of dimensions `dims`, this function "Wraps" +// each dim in-place for a tensor of rank `dim_post_expr`, allowing dims to be +// specified using negative indices. +// +// Additionally, if `wrap_scalar` is true then scalar tensors with rank 0, will +// allow dimensions in the range [-1, 0]. Otherwise, an IndexError is raised for +// dimensions not in the range [-dim_post_expr, dim_post_expr). +template +inline void maybe_wrap_dims( + Container& dims, + int64_t dim_post_expr, + bool wrap_scalars = true) { + return maybe_wrap_dims_n( + dims.data(), dims.size(), dim_post_expr, wrap_scalars); +} + +// previously, size [0] tensors were the only possible empty tensors; thus, it +// wasn't possible to cat empty tensors unless all the other tensors were +// 1-dimensional, so we allowed these tensors to be "skipped" (both for wrap +// dimension behavior and dimension size checking). We maintain this behavior +// for backwards compatibility, but only for this specific size (i.e. other +// empty sizes are not skipped). +template +inline int64_t _legacy_cat_wrap_dim( + int64_t dim, + const std::vector>& tensor_sizes) { + for (auto& sizes : tensor_sizes) { + if (sizes.size() == 1 && sizes[0] == 0) { + continue; + } + return maybe_wrap_dim(dim, sizes.size()); + } + return dim; +} + +inline int64_t legacy_cat_wrap_dim( + int64_t dim, + const std::vector>& tensor_sizes) { + return _legacy_cat_wrap_dim(dim, tensor_sizes); +} + +inline int64_t legacy_cat_wrap_dim_symint( + int64_t dim, + const std::vector>& tensor_sizes) { + return _legacy_cat_wrap_dim(dim, tensor_sizes); +} + +inline int64_t legacy_cat_wrap_dim( + int64_t dim, + const MaterializedITensorListRef& tensors) { + for (const Tensor& tensor : tensors) { + if (tensor.dim() == 1 && tensor.sizes()[0] == 0) { + continue; + } + return maybe_wrap_dim(dim, tensor.dim()); + } + return dim; +} + +// wrap negative dims in a vector +inline void wrap_all_dims( + std::vector& dims_to_wrap, + int64_t tensor_total_dims) { + for (const auto i : c10::irange(dims_to_wrap.size())) { + dims_to_wrap[i] = maybe_wrap_dim(dims_to_wrap[i], tensor_total_dims); + } +} + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/autocast_mode.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/autocast_mode.h new file mode 100644 index 0000000000000000000000000000000000000000..b3f2fcd511ff65ad8f78fc60759da354558858c6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/autocast_mode.h @@ -0,0 +1,647 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace at::autocast { + +TORCH_API bool is_enabled(); +TORCH_API void set_enabled(bool enabled); +TORCH_API void clear_cache(); +TORCH_API int increment_nesting(); +TORCH_API int decrement_nesting(); +TORCH_API bool is_cpu_enabled(); +TORCH_API void set_cpu_enabled(bool enabled); +TORCH_API at::ScalarType get_autocast_gpu_dtype(); +TORCH_API at::ScalarType get_autocast_cpu_dtype(); +TORCH_API void set_autocast_gpu_dtype(at::ScalarType dtype); +TORCH_API void set_autocast_cpu_dtype(at::ScalarType dtype); +TORCH_API bool is_xpu_enabled(); +TORCH_API void set_xpu_enabled(bool enabled); +TORCH_API at::ScalarType get_autocast_xpu_dtype(); +TORCH_API void set_autocast_xpu_dtype(at::ScalarType dtype); +TORCH_API bool is_ipu_enabled(); +TORCH_API void set_ipu_enabled(bool enabled); +TORCH_API at::ScalarType get_autocast_ipu_dtype(); +TORCH_API void set_autocast_ipu_dtype(at::ScalarType dtype); +TORCH_API bool is_hpu_enabled(); +TORCH_API void set_hpu_enabled(bool enabled); +TORCH_API at::ScalarType get_autocast_hpu_dtype(); +TORCH_API void set_autocast_hpu_dtype(at::ScalarType dtype); +TORCH_API bool is_xla_enabled(); +TORCH_API void set_xla_enabled(bool enabled); +TORCH_API at::ScalarType get_autocast_xla_dtype(); +TORCH_API void set_autocast_xla_dtype(at::ScalarType dtype); +TORCH_API bool is_privateuseone_enabled(); +TORCH_API void set_privateuseone_enabled(bool enabled); +TORCH_API at::ScalarType get_autocast_privateuseone_dtype(); +TORCH_API void set_autocast_privateuseone_dtype(at::ScalarType dtype); +TORCH_API bool is_autocast_cache_enabled(); +TORCH_API void set_autocast_cache_enabled(bool enabled); + +namespace { +inline bool is_autocast_eligible( + const Tensor& tensor, + c10::DeviceType device_type) { + switch (device_type) { + case c10::DeviceType::CUDA: + return (tensor.is_cuda() || tensor.is_xla()) && + tensor.is_floating_point(); + case c10::DeviceType::CPU: + return (tensor.is_cpu() || tensor.is_mkldnn()) && + tensor.is_floating_point(); + case c10::DeviceType::XPU: + return tensor.is_xpu() && tensor.is_floating_point(); + case c10::DeviceType::IPU: + return tensor.is_ipu() && tensor.is_floating_point(); + case c10::DeviceType::HPU: + return tensor.is_hpu() && tensor.is_floating_point(); + case c10::DeviceType::XLA: + return tensor.is_xla() && tensor.is_floating_point(); + case c10::DeviceType::PrivateUse1: + return tensor.is_privateuseone() && tensor.is_floating_point(); + default: + return false; + } +} +} // namespace + +inline DispatchKey get_autocast_dispatch_key_from_device_type( + c10::DeviceType device_type) { + switch (device_type) { + case c10::DeviceType::CUDA: + return DispatchKey::Autocast; + case c10::DeviceType::CPU: + return DispatchKey::AutocastCPU; + case c10::DeviceType::XPU: + return DispatchKey::AutocastXPU; + case c10::DeviceType::IPU: + return DispatchKey::AutocastIPU; + case c10::DeviceType::HPU: + return DispatchKey::AutocastHPU; + case c10::DeviceType::XLA: + return DispatchKey::AutocastXLA; + case c10::DeviceType::PrivateUse1: + return DispatchKey::AutocastPrivateUse1; + default: + throw std::runtime_error( + "unknown device type for autocast in get_autocast_dispatch_key_from_device_type"); + } +} + +inline at::ScalarType get_lower_precision_fp_from_device_type( + c10::DeviceType device_type) { + switch (device_type) { + case c10::DeviceType::CUDA: + return get_autocast_gpu_dtype(); + case c10::DeviceType::CPU: + return get_autocast_cpu_dtype(); + case c10::DeviceType::XPU: + return get_autocast_xpu_dtype(); + case c10::DeviceType::IPU: + return get_autocast_ipu_dtype(); + case c10::DeviceType::HPU: + return get_autocast_hpu_dtype(); + case c10::DeviceType::XLA: + return get_autocast_xla_dtype(); + case c10::DeviceType::PrivateUse1: + return get_autocast_privateuseone_dtype(); + default: + throw std::runtime_error( + "unknown device type for autocast in get_lower_precision_fp_from_device_type"); + } +} + +/******************************************************************** +Logic to extract the promote type from any Tensor or TensorList args. +********************************************************************/ + +// Overload to catch Tensor args. +// If nextArg is floating-point, compare its scalar_type with our +// current best guess for the promote type, and update if necessary. +inline at::ScalarType prioritize( + at::ScalarType current, + const Tensor& nextArg, + c10::DeviceType device_type = c10::DeviceType::CUDA) { + if (current == at::kDouble) { + AT_ERROR("promote type is double in at::autocast::prioritize"); + return current; + } + at::ScalarType lower_precision_fp = + get_lower_precision_fp_from_device_type(device_type); + if (is_autocast_eligible(nextArg, device_type)) { + auto next = nextArg.scalar_type(); + if (next == at::kDouble) { + return current; // ignores double tensors + } else if (current == at::kFloat || next == at::kFloat) { + return at::kFloat; // prioritizes float over lower_precision_fp + } else if (current == lower_precision_fp && next == lower_precision_fp) { + return lower_precision_fp; + } else { + AT_ERROR("Unexpected floating ScalarType in at::autocast::prioritize"); + return current; + } + } else { + return current; + } +} + +// Overload to catch TensorList args (for e.g. cat, stack). +// Reuses the overload above to process each Tensor in the list. +inline at::ScalarType prioritize( + at::ScalarType current, + const TensorList& list, + c10::DeviceType device_type = c10::DeviceType::CUDA) { + for (const auto& tensor : list) { + current = prioritize(current, tensor, device_type); + } + return current; +} + +inline at::ScalarType prioritize( + at::ScalarType current, + const ITensorListRef& list, + c10::DeviceType device_type = c10::DeviceType::CUDA) { + for (const auto& tensor : list) { + current = prioritize(current, tensor, device_type); + } + return current; +} + +// Template to catch non-Tensor args (no-op that returns current best guess) +template +inline at::ScalarType prioritize( + at::ScalarType current, + T nextArg, + c10::DeviceType device_type = c10::DeviceType::CUDA) { + return current; +} + +// Overload for the tail case. +inline at::ScalarType promote_type( + at::ScalarType current, + c10::DeviceType device_type) { + return current; +} + +// Unpack args and determine if incoming lower_precision_fp tensors need to be +// promoted to float32. Non-Tensor arguments are ignored. +template +inline at::ScalarType promote_type( + at::ScalarType current, + c10::DeviceType device_type, + Arg0 arg0, + Args... args) { + auto new_current = prioritize(current, arg0, device_type); + return promote_type(new_current, device_type, args...); +} + +/**************************************************** +Logic to apply cached casting to any Tensor argument. +****************************************************/ +inline bool is_eligible( + const Tensor& arg, + c10::DeviceType device_type = c10::DeviceType::CUDA) { + return ( + arg.defined() && is_autocast_eligible(arg, device_type) && + (arg.scalar_type() != at::kDouble)); +} + +// Overload to catch Tensor args +TORCH_API Tensor cached_cast( + at::ScalarType to_type, + const Tensor& arg, + c10::DeviceType device_type = c10::DeviceType::CUDA); + +// Overload to process optional +inline c10::optional cached_cast( + at::ScalarType to_type, + const c10::optional& arg, + c10::DeviceType device_type = c10::DeviceType::CUDA) { + if (arg.has_value()) { + return cached_cast(to_type, *arg, device_type); + } else { + return c10::nullopt; + } +} + +// Overload to process TensorLists +inline std::vector cached_cast( + at::ScalarType to_type, + const TensorList& arg, + c10::DeviceType device_type = c10::DeviceType::CUDA) { + std::vector vec; + vec.reserve(arg.size()); + for (const auto& t : arg) { + vec.emplace_back(cached_cast(to_type, t, device_type)); + } + return vec; +} + +inline std::vector cached_cast( + at::ScalarType to_type, + const ITensorListRef& arg, + c10::DeviceType device_type = c10::DeviceType::CUDA) { + std::vector vec; + vec.reserve(arg.size()); + for (const auto& t : arg) { + vec.emplace_back(cached_cast(to_type, t, device_type)); + } + return vec; +} + +// Template to catch non-Tensor args. +template +inline T cached_cast( + at::ScalarType to_type, + T arg, + c10::DeviceType device_type = c10::DeviceType::CUDA) { + return arg; +} + +/******************************************************* +Logic to flip an output dtype flag. +Keep it simple for now by assuming only one such flag is +present in the argument list. If I ever need a function +with more than flag I'll figure out something else. +The policy is: +If the user has explicity specified a dtype, respect it. +Otherwise, set it to the autocast type. +********************************************************/ + +// Overload to catch dtype flags +c10::optional inline set_opt_dtype( + at::ScalarType to_type, + const c10::optional& dtype) { + return dtype.has_value() ? dtype : to_type; +} + +// Template to catch other args +template +inline T set_opt_dtype(at::ScalarType to_type, T arg) { + return arg; +} + +template +inline bool firstarg_is_eligible( + c10::DeviceType device_type, + const Tensor& arg, + Args... args) { + return is_eligible(arg, device_type); +} + +template +inline at::ScalarType type_from_firstarg( + c10::DeviceType device_type, + at::ScalarType to_type, + const Tensor& arg, + Args... args) { + return (is_eligible(arg, device_type) ? to_type : arg.scalar_type()); +} + +// Policies correspond to op categories that need code-divergent handling. +// Wrapper templates below are specialized based on a policy template parameter. +enum class CastPolicy : uint8_t { + lower_precision_fp = 0, // Cast all inputs to lower_precision_fp before + // running the op. Currently, lower_precision_fp is + // fp16 for AutocastCUDA, and is defined by user + // (default bf16) for AutocastCPU or other device. + fp32, // Cast all inputs to at::kFloat before running the op. + fp32_set_opt_dtype, // Treats functions (like softmax) that + // 1. we'd like to run in fp32 and + // 2. have a c10::optional arg that controls + // the output type. + // fp32_set_opt_dtype wrappers' policy is: if the output + // type is already set, don't touch it, otherwise, set + // it to at::kFloat. + fp32_append_dtype, // Treats functions (like norm) that + // 1. we'd like to run in fp32 and + // 2. have some overloads that accept an output type and + // other overloads that don't. + // fp32_append_dtype wrappers wrap the overloads that don't + // have an output dtype. + // The wrapper policy is: append at::kFloat to the args, + // and redispatch to the type-aware overload. + promote, // Run in the widest dtype among several args. +}; + +/******************************************************************************************************** +Templates to provide wrapper functions + +I'm copying the pattern used in core/boxing/impl/WrapFunctionIntoFunctor.h to +extract args and return type. (see also +https://stackoverflow.com/questions/46533698/how-to-deduce-argument-list-from-function-pointer) + +This strategy uses an exterior "WrapFunction" that extracts arguments on behalf +of (in my case several specializations of) an interior "WrapFunction_". +Interior WrapFunction_ specializations are defined for each CastPolicy. +********************************************************************************************************/ + +// Base template for WrapFunction_, which is specialized to contain a "call" +// method each CastPolicy +template < + CastPolicy policy, + c10::DeviceType device_type, + class Redispatch, + Redispatch* F, + class Ret, + class ArgList> +struct WrapFunction_ {}; + +// CastPolicy::lower_precision_fp General_DeviceType +template < + c10::DeviceType device_type, + class Redispatch, + Redispatch* F, + class Ret, + class... Args> +struct WrapFunction_< + CastPolicy::lower_precision_fp, + device_type, + Redispatch, + F, + Ret, + guts::typelist::typelist> { + static Ret call(Args... args) { + c10::impl::ExcludeDispatchKeyGuard no_autocast( + get_autocast_dispatch_key_from_device_type(device_type)); + return (*F)(cached_cast( + get_lower_precision_fp_from_device_type(device_type), + args, + device_type)...); + } +}; + +// CastPolicy::fp32 General_DeviceType +template < + c10::DeviceType device_type, + class Redispatch, + Redispatch* F, + class Ret, + class... Args> +struct WrapFunction_< + CastPolicy::fp32, + device_type, + Redispatch, + F, + Ret, + guts::typelist::typelist> { + static Ret call(Args... args) { + c10::impl::ExcludeDispatchKeyGuard no_autocast( + get_autocast_dispatch_key_from_device_type(device_type)); + return (*F)(cached_cast(at::kFloat, args, device_type)...); + } +}; + +// CastPolicy::fp32_set_opt_dtype General_DeviceType +template < + c10::DeviceType device_type, + class Redispatch, + Redispatch* F, + class Ret, + class... Args> +struct WrapFunction_< + CastPolicy::fp32_set_opt_dtype, + device_type, + Redispatch, + F, + Ret, + guts::typelist::typelist> { + static Ret call(Args... args) { + c10::impl::ExcludeDispatchKeyGuard no_autocast( + get_autocast_dispatch_key_from_device_type(device_type)); + if (firstarg_is_eligible(device_type, args...)) { + return (*F)(set_opt_dtype(at::kFloat, args)...); + } else { + // If ineligible, calls F with unaltered args. Does not set opt dtype, + // because setting opt dtype explicitly may interfere with internal + // implicit promotion decisions. + return (*F)(args...); + } + } +}; + +// CastPolicy::fp32_append_dtype General_DeviceType +template < + c10::DeviceType device_type, + class Redispatch, + Redispatch* F, + class Ret, + class... Args> +struct WrapFunction_< + CastPolicy::fp32_append_dtype, + device_type, + Redispatch, + F, + Ret, + guts::typelist::typelist> { + static Ret call(Args... args) { + c10::impl::ExcludeDispatchKeyGuard no_autocast( + get_autocast_dispatch_key_from_device_type(device_type)); + at::ScalarType out_type = + type_from_firstarg(device_type, at::kFloat, args...); + return (*F)(args..., out_type); + } +}; + +// CastPolicy::promote General_DeviceType +template < + c10::DeviceType device_type, + class Redispatch, + Redispatch* F, + class Ret, + class... Args> +struct WrapFunction_< + CastPolicy::promote, + device_type, + Redispatch, + F, + Ret, + guts::typelist::typelist> { + static Ret call(Args... args) { + c10::impl::ExcludeDispatchKeyGuard no_autocast( + get_autocast_dispatch_key_from_device_type(device_type)); + auto to_type = promote_type( + get_lower_precision_fp_from_device_type(device_type), + device_type, + args...); + return (*F)(cached_cast(to_type, args, device_type)...); + } +}; + +// Wrapper to infer return_type and parameter_types for WrapFunction_ (imitating +// core/boxing/impl/WrapFunctionIntoFunctor.h) +template < + CastPolicy policy, + c10::DeviceType device_type, + class Registered, // The signature for which we're registering. The + // dispatcher's calling code invokes our registered + // functions with arguments matching Registered, so we + // register WrapFunction_::call methods with a matching + // signature to properly field those arguments. + // guts::function_traits below extracts return_type and + // parameter_types from Registered, which WrapFunction_ + // templates above use to declare their call methods. + class Redispatch, // The signature for the function we're redispatching to. + // In most cases this is the same as Registered, but for + // some ops (for example, ops where we append a dtype) + // it's useful to redispatch to a function with a + // different signature. + Redispatch* F> // The actual function we're redispatching to. +struct WrapFunction final { + using type = WrapFunction_< + policy, + device_type, + Redispatch, + F, + typename guts::function_traits::return_type, + typename guts::function_traits::parameter_types>; +}; + +/***************************************************************************************************************** +This section performs load-time registration for autocast wrappers. + +It's debatable at what level operations should be patched. We'd like casts to +be autograd-exposed and precede autograd history recording, so that for +lower_precision_fp ops, input tensors are saved for backward in +lower_precision_fp rather than fp32. Saving inputs in lower_precision_fp +can significantly reduce a model's memory footprint. + +Option 1 (strawman): Patch only at the level of explicit calls into +cudnn/cublas (cudnn_convolution, etc), because those are the code paths that are +guaranteed to use Tensor Cores, therefore they're the ones that will benefit +most from lower_precision_fp. Potential pitfall: convolutions (and other ops) +are wrapped in several layers of at::* calls. If one of those happens to record +autograd history, then we've lost the opportunity to save inputs in +lower_precision_fp. + +Option 2: Patch the Python-exposed surface of calls, to make 100% sure autograd +history recording can't sneak in ahead of autocast. This mirrors Apex most +closely. + +I think Option 2 is the right answer for all ops, not just convolutions. Option +2 is what I implement here. +*****************************************************************************************************************/ + +/******************************************************************************************************************** +Explicit registration for out-of-place ops + +The stuff below could be codegenned. Ed said +> you are going to have to write the function definition at some point, I +wouldn't try to get clever about it Therefore, for the moment, this is all +copy pasted in from VariableTypeEverything.cpp with appropriate substitutions. +********************************************************************************************************************/ + +} // namespace at::autocast + +#define ADD_NS(RAW_OP) at::RAW_OP + +// Common cases where registration signature matches redispatch signature +// (that's why SIGNATURE is repeated in the WrapFunction instantiation) +#define KERNEL(DISPATCHKEY, OP, POLICY) \ + m.impl( \ + TORCH_SELECTIVE_NAME("aten::" #OP), \ + &::at::autocast::WrapFunction< \ + ::at::autocast::CastPolicy::POLICY, \ + DISPATCHKEY, \ + decltype(ATEN_FN(OP)), \ + decltype(ATEN_FN(OP)), \ + &ATEN_FN(OP)>::type::call); + +#define KERNEL2(DISPATCHKEY, OP, OVERLOAD, POLICY) \ + m.impl( \ + TORCH_SELECTIVE_NAME("aten::" #OP "." #OVERLOAD), \ + &::at::autocast::WrapFunction< \ + ::at::autocast::CastPolicy::POLICY, \ + DISPATCHKEY, \ + decltype(ATEN_FN2(OP, OVERLOAD)), \ + decltype(ATEN_FN2(OP, OVERLOAD)), \ + &ATEN_FN2(OP, OVERLOAD)>::type::call); + +// Less-common but still useful case: redispatching to a function +// with a new signature (e.g. appending a dtype) +#define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \ + DISPATCHKEY, \ + REDISPATCH_FUNC, \ + REGISTER_NAME, \ + REGISTER_SIGNATURE, \ + REDISPATCH_SIGNATURE, \ + POLICY) \ + m.impl( \ + TORCH_SELECTIVE_NAME("aten::" REGISTER_NAME), \ + &::at::autocast::WrapFunction< \ + ::at::autocast::CastPolicy::POLICY, \ + DISPATCHKEY, \ + REGISTER_SIGNATURE, \ + REDISPATCH_SIGNATURE, \ + &REDISPATCH_FUNC>::type::call); + +// KERNEL_CPU/KERNEL_CPU2/KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CPU +// registration for AutocastCPU +#define KERNEL_CPU(OP, POLICY) KERNEL(c10::DeviceType::CPU, OP, POLICY) + +#define KERNEL_CPU2(OP, OVERLOAD, POLICY) \ + KERNEL2(c10::DeviceType::CPU, OP, OVERLOAD, POLICY) + +#define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CPU( \ + REDISPATCH_FUNC, \ + REGISTER_NAME, \ + REGISTER_SIGNATURE, \ + REDISPATCH_SIGNATURE, \ + POLICY) \ + KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \ + c10::DeviceType::CPU, \ + REDISPATCH_FUNC, \ + REGISTER_NAME, \ + REGISTER_SIGNATURE, \ + REDISPATCH_SIGNATURE, \ + POLICY) + +// KERNEL_CUDA/KERNEL_CUDA2/KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CUDA +// registration for AutocastCUDA +#define KERNEL_CUDA(OP, POLICY) KERNEL(c10::DeviceType::CUDA, OP, POLICY) + +#define KERNEL_CUDA2(OP, OVERLOAD, POLICY) \ + KERNEL2(c10::DeviceType::CUDA, OP, OVERLOAD, POLICY) + +#define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CUDA( \ + REDISPATCH_FUNC, \ + REGISTER_NAME, \ + REGISTER_SIGNATURE, \ + REDISPATCH_SIGNATURE, \ + POLICY) \ + KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \ + c10::DeviceType::CUDA, \ + REDISPATCH_FUNC, \ + REGISTER_NAME, \ + REGISTER_SIGNATURE, \ + REDISPATCH_SIGNATURE, \ + POLICY) + +// KERNEL_PRIVATEUSEONE/KERNEL_PRIVATEUSEONE2/ +// KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_PRIVATEUSEONE +// registration for AutocastPrivateUse1 +#define KERNEL_PRIVATEUSEONE(OP, POLICY) \ + KERNEL(c10::DeviceType::PrivateUse1, OP, POLICY) + +#define KERNEL_PRIVATEUSEONE2(OP, OVERLOAD, POLICY) \ + KERNEL2(c10::DeviceType::PrivateUse1, OP, OVERLOAD, POLICY) + +#define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_PRIVATEUSEONE( \ + REDISPATCH_FUNC, \ + REGISTER_NAME, \ + REGISTER_SIGNATURE, \ + REDISPATCH_SIGNATURE, \ + POLICY) \ + KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \ + c10::DeviceType::PrivateUse1, \ + REDISPATCH_FUNC, \ + REGISTER_NAME, \ + REGISTER_SIGNATURE, \ + REDISPATCH_SIGNATURE, \ + POLICY) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/div_rtn.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/div_rtn.h new file mode 100644 index 0000000000000000000000000000000000000000..4935f49ae2726389441e4012cc15bcf3981f2e84 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/div_rtn.h @@ -0,0 +1,11 @@ +#pragma once + +// Integer division rounding to -Infinity +template +static inline T div_rtn(T x, T y) { + int q = x / y; + int r = x % y; + if ((r != 0) && ((r < 0) != (y < 0))) + --q; + return q; +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/jit_macros.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/jit_macros.h new file mode 100644 index 0000000000000000000000000000000000000000..9af826549021a0853beb83c74b6ac695728ab054 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/jit_macros.h @@ -0,0 +1,7 @@ +#pragma once +#include +#include + +// AT_USE_JITERATOR(), controls whether we jit some elementwise kernels +#define AT_USE_JITERATOR() true +#define jiterator_stringify(...) std::string(#__VA_ARGS__); diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2cf25c595a7ffdf209854ba78dcd1d57c74d20c7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _cudnn_rnn(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state); +TORCH_API ::std::tuple _cudnn_rnn_symint(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5fafbcdec1593de4b5a5509005a806efbeb4fbcd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cufft_get_plan_cache_size { + using schema = int64_t (at::DeviceIndex); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cufft_get_plan_cache_size") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cufft_get_plan_cache_size(DeviceIndex device_index) -> int") + static int64_t call(at::DeviceIndex device_index); + static int64_t redispatch(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log.h new file mode 100644 index 0000000000000000000000000000000000000000..cfa5e50ea148351730407548a030209f0eac7aa4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_log(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_log(at::TensorList self) { + return at::_ops::_foreach_log::call(self); +} + +// aten::_foreach_log_(Tensor(a!)[] self) -> () +inline void _foreach_log_(at::TensorList self) { + return at::_ops::_foreach_log_::call(self); +} + +// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_log_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log_out::call(self, out); +} +// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_log_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log_out::call(self, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt.h new file mode 100644 index 0000000000000000000000000000000000000000..84400e1dd4cb00304502d931c31f67358ee2e0aa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_sqrt(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_sqrt(at::TensorList self) { + return at::_ops::_foreach_sqrt::call(self); +} + +// aten::_foreach_sqrt_(Tensor(a!)[] self) -> () +inline void _foreach_sqrt_(at::TensorList self) { + return at::_ops::_foreach_sqrt_::call(self); +} + +// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_sqrt_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sqrt_out::call(self, out); +} +// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_sqrt_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sqrt_out::call(self, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_pin_memory_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_pin_memory_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1aa883f6682ccf181a8dbe048f25888c38782a8b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_pin_memory_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _pin_memory_out(const at::Tensor & self, c10::optional device, at::Tensor & out); +TORCH_API at::Tensor _pin_memory_cuda(const at::Tensor & self, c10::optional device=c10::nullopt); +TORCH_API at::Tensor _pin_memory_nested(const at::Tensor & self, c10::optional device=c10::nullopt); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/add_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/add_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ce96b40c3dc3ece19c59c1be2ea5ebaed063678e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/add_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_native.h new file mode 100644 index 0000000000000000000000000000000000000000..00ebd5527c4c50afda88c0257bd154414289504b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & channel_shuffle_out_symint(const at::Tensor & self, c10::SymInt groups, at::Tensor & out); +TORCH_API at::Tensor channel_shuffle(const at::Tensor & self, int64_t groups); +TORCH_API at::Tensor channel_shuffle_quantized_cpu(const at::Tensor & self, int64_t groups); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..53e8b1502f9c757594c7e02938720593df2b5870 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cholesky_out { + using schema = at::Tensor & (const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cholesky") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, bool upper, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out); +}; + +struct TORCH_API cholesky { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cholesky") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cholesky(Tensor self, bool upper=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, bool upper); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0e8da1ff26a5339ee7b6f6436385cde4f41eaa6e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fft_rfft2 { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_rfft2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm); +}; + +struct TORCH_API fft_rfft2_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_rfft2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c74a5b8e0612a06d0304a8740e8e484d27bca82a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_inv_ex { + using schema = ::std::tuple (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_inv_ex") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)") + static ::std::tuple call(const at::Tensor & A, bool check_errors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors); +}; + +struct TORCH_API linalg_inv_ex_inverse { + using schema = ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_inv_ex") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "inverse") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)") + static ::std::tuple call(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nansum.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nansum.h new file mode 100644 index 0000000000000000000000000000000000000000..39681928979d4d6c8c4389e44bd2ad38098cfbc0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nansum.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor nansum(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nansum::call(self, dim, keepdim, dtype); +} + +// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nansum_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nansum_out::call(self, dim, keepdim, dtype, out); +} +// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nansum_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::nansum_out::call(self, dim, keepdim, dtype, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d.h new file mode 100644 index 0000000000000000000000000000000000000000..a04cbedd321675434cf7530ae7b6bb88ec13ad68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out); + } +} + +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out); + } +} + +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_transpose2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); + } +} + +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_transpose2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); + } +} + +// aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor +inline at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose2d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation)); +} +namespace symint { + template ::value>> + at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose2d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation)); + } +} + +// aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor +inline at::Tensor slow_conv_transpose2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); +} +namespace symint { + template ::value>> + at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); + } +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8a274341b5a1e1360cc19e16ce1a155cf6da73c8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor trapz(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1); +TORCH_API at::Tensor trapz(const at::Tensor & y, double dx=1, int64_t dim=-1); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..15de400f78b921372783b0036cf5ba0b30f697cd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_bicubic2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/pybind11/detail/class.h b/llmeval-env/lib/python3.10/site-packages/torch/include/pybind11/detail/class.h new file mode 100644 index 0000000000000000000000000000000000000000..0b9ea42db6f2357b085dede5b05784d2959f1cf3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/pybind11/detail/class.h @@ -0,0 +1,748 @@ +/* + pybind11/detail/class.h: Python C API implementation details for py::class_ + + Copyright (c) 2017 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "../attr.h" +#include "../options.h" + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) + +#if !defined(PYPY_VERSION) +# define PYBIND11_BUILTIN_QUALNAME +# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj) +#else +// In PyPy, we still set __qualname__ so that we can produce reliable function type +// signatures; in CPython this macro expands to nothing: +# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj) \ + setattr((PyObject *) obj, "__qualname__", nameobj) +#endif + +inline std::string get_fully_qualified_tp_name(PyTypeObject *type) { +#if !defined(PYPY_VERSION) + return type->tp_name; +#else + auto module_name = handle((PyObject *) type).attr("__module__").cast(); + if (module_name == PYBIND11_BUILTINS_MODULE) + return type->tp_name; + else + return std::move(module_name) + "." + type->tp_name; +#endif +} + +inline PyTypeObject *type_incref(PyTypeObject *type) { + Py_INCREF(type); + return type; +} + +#if !defined(PYPY_VERSION) + +/// `pybind11_static_property.__get__()`: Always pass the class instead of the instance. +extern "C" inline PyObject *pybind11_static_get(PyObject *self, PyObject * /*ob*/, PyObject *cls) { + return PyProperty_Type.tp_descr_get(self, cls, cls); +} + +/// `pybind11_static_property.__set__()`: Just like the above `__get__()`. +extern "C" inline int pybind11_static_set(PyObject *self, PyObject *obj, PyObject *value) { + PyObject *cls = PyType_Check(obj) ? obj : (PyObject *) Py_TYPE(obj); + return PyProperty_Type.tp_descr_set(self, cls, value); +} + +// Forward declaration to use in `make_static_property_type()` +inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type); + +/** A `static_property` is the same as a `property` but the `__get__()` and `__set__()` + methods are modified to always use the object type instead of a concrete instance. + Return value: New reference. */ +inline PyTypeObject *make_static_property_type() { + constexpr auto *name = "pybind11_static_property"; + auto name_obj = reinterpret_steal(PYBIND11_FROM_STRING(name)); + + /* Danger zone: from now (and until PyType_Ready), make sure to + issue no Python C API calls which could potentially invoke the + garbage collector (the GC will call type_traverse(), which will in + turn find the newly constructed type in an invalid state) */ + auto *heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0); + if (!heap_type) { + pybind11_fail("make_static_property_type(): error allocating type!"); + } + + heap_type->ht_name = name_obj.inc_ref().ptr(); +# ifdef PYBIND11_BUILTIN_QUALNAME + heap_type->ht_qualname = name_obj.inc_ref().ptr(); +# endif + + auto *type = &heap_type->ht_type; + type->tp_name = name; + type->tp_base = type_incref(&PyProperty_Type); + type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + type->tp_descr_get = pybind11_static_get; + type->tp_descr_set = pybind11_static_set; + +# if PY_VERSION_HEX >= 0x030C0000 + // Since Python-3.12 property-derived types are required to + // have dynamic attributes (to set `__doc__`) + enable_dynamic_attributes(heap_type); +# endif + + if (PyType_Ready(type) < 0) { + pybind11_fail("make_static_property_type(): failure in PyType_Ready()!"); + } + + setattr((PyObject *) type, "__module__", str("pybind11_builtins")); + PYBIND11_SET_OLDPY_QUALNAME(type, name_obj); + + return type; +} + +#else // PYPY + +/** PyPy has some issues with the above C API, so we evaluate Python code instead. + This function will only be called once so performance isn't really a concern. + Return value: New reference. */ +inline PyTypeObject *make_static_property_type() { + auto d = dict(); + PyObject *result = PyRun_String(R"(\ +class pybind11_static_property(property): + def __get__(self, obj, cls): + return property.__get__(self, cls, cls) + + def __set__(self, obj, value): + cls = obj if isinstance(obj, type) else type(obj) + property.__set__(self, cls, value) +)", + Py_file_input, + d.ptr(), + d.ptr()); + if (result == nullptr) + throw error_already_set(); + Py_DECREF(result); + return (PyTypeObject *) d["pybind11_static_property"].cast().release().ptr(); +} + +#endif // PYPY + +/** Types with static properties need to handle `Type.static_prop = x` in a specific way. + By default, Python replaces the `static_property` itself, but for wrapped C++ types + we need to call `static_property.__set__()` in order to propagate the new value to + the underlying C++ data structure. */ +extern "C" inline int pybind11_meta_setattro(PyObject *obj, PyObject *name, PyObject *value) { + // Use `_PyType_Lookup()` instead of `PyObject_GetAttr()` in order to get the raw + // descriptor (`property`) instead of calling `tp_descr_get` (`property.__get__()`). + PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name); + + // The following assignment combinations are possible: + // 1. `Type.static_prop = value` --> descr_set: `Type.static_prop.__set__(value)` + // 2. `Type.static_prop = other_static_prop` --> setattro: replace existing `static_prop` + // 3. `Type.regular_attribute = value` --> setattro: regular attribute assignment + auto *const static_prop = (PyObject *) get_internals().static_property_type; + const auto call_descr_set = (descr != nullptr) && (value != nullptr) + && (PyObject_IsInstance(descr, static_prop) != 0) + && (PyObject_IsInstance(value, static_prop) == 0); + if (call_descr_set) { + // Call `static_property.__set__()` instead of replacing the `static_property`. +#if !defined(PYPY_VERSION) + return Py_TYPE(descr)->tp_descr_set(descr, obj, value); +#else + if (PyObject *result = PyObject_CallMethod(descr, "__set__", "OO", obj, value)) { + Py_DECREF(result); + return 0; + } else { + return -1; + } +#endif + } else { + // Replace existing attribute. + return PyType_Type.tp_setattro(obj, name, value); + } +} + +/** + * Python 3's PyInstanceMethod_Type hides itself via its tp_descr_get, which prevents aliasing + * methods via cls.attr("m2") = cls.attr("m1"): instead the tp_descr_get returns a plain function, + * when called on a class, or a PyMethod, when called on an instance. Override that behaviour here + * to do a special case bypass for PyInstanceMethod_Types. + */ +extern "C" inline PyObject *pybind11_meta_getattro(PyObject *obj, PyObject *name) { + PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name); + if (descr && PyInstanceMethod_Check(descr)) { + Py_INCREF(descr); + return descr; + } + return PyType_Type.tp_getattro(obj, name); +} + +/// metaclass `__call__` function that is used to create all pybind11 objects. +extern "C" inline PyObject *pybind11_meta_call(PyObject *type, PyObject *args, PyObject *kwargs) { + + // use the default metaclass call to create/initialize the object + PyObject *self = PyType_Type.tp_call(type, args, kwargs); + if (self == nullptr) { + return nullptr; + } + + // Ensure that the base __init__ function(s) were called + values_and_holders vhs(self); + for (const auto &vh : vhs) { + if (!vh.holder_constructed() && !vhs.is_redundant_value_and_holder(vh)) { + PyErr_Format(PyExc_TypeError, + "%.200s.__init__() must be called when overriding __init__", + get_fully_qualified_tp_name(vh.type->type).c_str()); + Py_DECREF(self); + return nullptr; + } + } + + return self; +} + +/// Cleanup the type-info for a pybind11-registered type. +extern "C" inline void pybind11_meta_dealloc(PyObject *obj) { + auto *type = (PyTypeObject *) obj; + auto &internals = get_internals(); + + // A pybind11-registered type will: + // 1) be found in internals.registered_types_py + // 2) have exactly one associated `detail::type_info` + auto found_type = internals.registered_types_py.find(type); + if (found_type != internals.registered_types_py.end() && found_type->second.size() == 1 + && found_type->second[0]->type == type) { + + auto *tinfo = found_type->second[0]; + auto tindex = std::type_index(*tinfo->cpptype); + internals.direct_conversions.erase(tindex); + + if (tinfo->module_local) { + get_local_internals().registered_types_cpp.erase(tindex); + } else { + internals.registered_types_cpp.erase(tindex); + } + internals.registered_types_py.erase(tinfo->type); + + // Actually just `std::erase_if`, but that's only available in C++20 + auto &cache = internals.inactive_override_cache; + for (auto it = cache.begin(), last = cache.end(); it != last;) { + if (it->first == (PyObject *) tinfo->type) { + it = cache.erase(it); + } else { + ++it; + } + } + + delete tinfo; + } + + PyType_Type.tp_dealloc(obj); +} + +/** This metaclass is assigned by default to all pybind11 types and is required in order + for static properties to function correctly. Users may override this using `py::metaclass`. + Return value: New reference. */ +inline PyTypeObject *make_default_metaclass() { + constexpr auto *name = "pybind11_type"; + auto name_obj = reinterpret_steal(PYBIND11_FROM_STRING(name)); + + /* Danger zone: from now (and until PyType_Ready), make sure to + issue no Python C API calls which could potentially invoke the + garbage collector (the GC will call type_traverse(), which will in + turn find the newly constructed type in an invalid state) */ + auto *heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0); + if (!heap_type) { + pybind11_fail("make_default_metaclass(): error allocating metaclass!"); + } + + heap_type->ht_name = name_obj.inc_ref().ptr(); +#ifdef PYBIND11_BUILTIN_QUALNAME + heap_type->ht_qualname = name_obj.inc_ref().ptr(); +#endif + + auto *type = &heap_type->ht_type; + type->tp_name = name; + type->tp_base = type_incref(&PyType_Type); + type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + + type->tp_call = pybind11_meta_call; + + type->tp_setattro = pybind11_meta_setattro; + type->tp_getattro = pybind11_meta_getattro; + + type->tp_dealloc = pybind11_meta_dealloc; + + if (PyType_Ready(type) < 0) { + pybind11_fail("make_default_metaclass(): failure in PyType_Ready()!"); + } + + setattr((PyObject *) type, "__module__", str("pybind11_builtins")); + PYBIND11_SET_OLDPY_QUALNAME(type, name_obj); + + return type; +} + +/// For multiple inheritance types we need to recursively register/deregister base pointers for any +/// base classes with pointers that are difference from the instance value pointer so that we can +/// correctly recognize an offset base class pointer. This calls a function with any offset base +/// ptrs. +inline void traverse_offset_bases(void *valueptr, + const detail::type_info *tinfo, + instance *self, + bool (*f)(void * /*parentptr*/, instance * /*self*/)) { + for (handle h : reinterpret_borrow(tinfo->type->tp_bases)) { + if (auto *parent_tinfo = get_type_info((PyTypeObject *) h.ptr())) { + for (auto &c : parent_tinfo->implicit_casts) { + if (c.first == tinfo->cpptype) { + auto *parentptr = c.second(valueptr); + if (parentptr != valueptr) { + f(parentptr, self); + } + traverse_offset_bases(parentptr, parent_tinfo, self, f); + break; + } + } + } + } +} + +inline bool register_instance_impl(void *ptr, instance *self) { + get_internals().registered_instances.emplace(ptr, self); + return true; // unused, but gives the same signature as the deregister func +} +inline bool deregister_instance_impl(void *ptr, instance *self) { + auto ®istered_instances = get_internals().registered_instances; + auto range = registered_instances.equal_range(ptr); + for (auto it = range.first; it != range.second; ++it) { + if (self == it->second) { + registered_instances.erase(it); + return true; + } + } + return false; +} + +inline void register_instance(instance *self, void *valptr, const type_info *tinfo) { + register_instance_impl(valptr, self); + if (!tinfo->simple_ancestors) { + traverse_offset_bases(valptr, tinfo, self, register_instance_impl); + } +} + +inline bool deregister_instance(instance *self, void *valptr, const type_info *tinfo) { + bool ret = deregister_instance_impl(valptr, self); + if (!tinfo->simple_ancestors) { + traverse_offset_bases(valptr, tinfo, self, deregister_instance_impl); + } + return ret; +} + +/// Instance creation function for all pybind11 types. It allocates the internal instance layout +/// for holding C++ objects and holders. Allocation is done lazily (the first time the instance is +/// cast to a reference or pointer), and initialization is done by an `__init__` function. +inline PyObject *make_new_instance(PyTypeObject *type) { +#if defined(PYPY_VERSION) + // PyPy gets tp_basicsize wrong (issue 2482) under multiple inheritance when the first + // inherited object is a plain Python type (i.e. not derived from an extension type). Fix it. + ssize_t instance_size = static_cast(sizeof(instance)); + if (type->tp_basicsize < instance_size) { + type->tp_basicsize = instance_size; + } +#endif + PyObject *self = type->tp_alloc(type, 0); + auto *inst = reinterpret_cast(self); + // Allocate the value/holder internals: + inst->allocate_layout(); + + return self; +} + +/// Instance creation function for all pybind11 types. It only allocates space for the +/// C++ object, but doesn't call the constructor -- an `__init__` function must do that. +extern "C" inline PyObject *pybind11_object_new(PyTypeObject *type, PyObject *, PyObject *) { + return make_new_instance(type); +} + +/// An `__init__` function constructs the C++ object. Users should provide at least one +/// of these using `py::init` or directly with `.def(__init__, ...)`. Otherwise, the +/// following default function will be used which simply throws an exception. +extern "C" inline int pybind11_object_init(PyObject *self, PyObject *, PyObject *) { + PyTypeObject *type = Py_TYPE(self); + std::string msg = get_fully_qualified_tp_name(type) + ": No constructor defined!"; + set_error(PyExc_TypeError, msg.c_str()); + return -1; +} + +inline void add_patient(PyObject *nurse, PyObject *patient) { + auto &internals = get_internals(); + auto *instance = reinterpret_cast(nurse); + instance->has_patients = true; + Py_INCREF(patient); + internals.patients[nurse].push_back(patient); +} + +inline void clear_patients(PyObject *self) { + auto *instance = reinterpret_cast(self); + auto &internals = get_internals(); + auto pos = internals.patients.find(self); + assert(pos != internals.patients.end()); + // Clearing the patients can cause more Python code to run, which + // can invalidate the iterator. Extract the vector of patients + // from the unordered_map first. + auto patients = std::move(pos->second); + internals.patients.erase(pos); + instance->has_patients = false; + for (PyObject *&patient : patients) { + Py_CLEAR(patient); + } +} + +/// Clears all internal data from the instance and removes it from registered instances in +/// preparation for deallocation. +inline void clear_instance(PyObject *self) { + auto *instance = reinterpret_cast(self); + + // Deallocate any values/holders, if present: + for (auto &v_h : values_and_holders(instance)) { + if (v_h) { + + // We have to deregister before we call dealloc because, for virtual MI types, we still + // need to be able to get the parent pointers. + if (v_h.instance_registered() + && !deregister_instance(instance, v_h.value_ptr(), v_h.type)) { + pybind11_fail( + "pybind11_object_dealloc(): Tried to deallocate unregistered instance!"); + } + + if (instance->owned || v_h.holder_constructed()) { + v_h.type->dealloc(v_h); + } + } + } + // Deallocate the value/holder layout internals: + instance->deallocate_layout(); + + if (instance->weakrefs) { + PyObject_ClearWeakRefs(self); + } + + PyObject **dict_ptr = _PyObject_GetDictPtr(self); + if (dict_ptr) { + Py_CLEAR(*dict_ptr); + } + + if (instance->has_patients) { + clear_patients(self); + } +} + +/// Instance destructor function for all pybind11 types. It calls `type_info.dealloc` +/// to destroy the C++ object itself, while the rest is Python bookkeeping. +extern "C" inline void pybind11_object_dealloc(PyObject *self) { + auto *type = Py_TYPE(self); + + // If this is a GC tracked object, untrack it first + // Note that the track call is implicitly done by the + // default tp_alloc, which we never override. + if (PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC) != 0) { + PyObject_GC_UnTrack(self); + } + + clear_instance(self); + + type->tp_free(self); + +#if PY_VERSION_HEX < 0x03080000 + // `type->tp_dealloc != pybind11_object_dealloc` means that we're being called + // as part of a derived type's dealloc, in which case we're not allowed to decref + // the type here. For cross-module compatibility, we shouldn't compare directly + // with `pybind11_object_dealloc`, but with the common one stashed in internals. + auto pybind11_object_type = (PyTypeObject *) get_internals().instance_base; + if (type->tp_dealloc == pybind11_object_type->tp_dealloc) + Py_DECREF(type); +#else + // This was not needed before Python 3.8 (Python issue 35810) + // https://github.com/pybind/pybind11/issues/1946 + Py_DECREF(type); +#endif +} + +std::string error_string(); + +/** Create the type which can be used as a common base for all classes. This is + needed in order to satisfy Python's requirements for multiple inheritance. + Return value: New reference. */ +inline PyObject *make_object_base_type(PyTypeObject *metaclass) { + constexpr auto *name = "pybind11_object"; + auto name_obj = reinterpret_steal(PYBIND11_FROM_STRING(name)); + + /* Danger zone: from now (and until PyType_Ready), make sure to + issue no Python C API calls which could potentially invoke the + garbage collector (the GC will call type_traverse(), which will in + turn find the newly constructed type in an invalid state) */ + auto *heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0); + if (!heap_type) { + pybind11_fail("make_object_base_type(): error allocating type!"); + } + + heap_type->ht_name = name_obj.inc_ref().ptr(); +#ifdef PYBIND11_BUILTIN_QUALNAME + heap_type->ht_qualname = name_obj.inc_ref().ptr(); +#endif + + auto *type = &heap_type->ht_type; + type->tp_name = name; + type->tp_base = type_incref(&PyBaseObject_Type); + type->tp_basicsize = static_cast(sizeof(instance)); + type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + + type->tp_new = pybind11_object_new; + type->tp_init = pybind11_object_init; + type->tp_dealloc = pybind11_object_dealloc; + + /* Support weak references (needed for the keep_alive feature) */ + type->tp_weaklistoffset = offsetof(instance, weakrefs); + + if (PyType_Ready(type) < 0) { + pybind11_fail("PyType_Ready failed in make_object_base_type(): " + error_string()); + } + + setattr((PyObject *) type, "__module__", str("pybind11_builtins")); + PYBIND11_SET_OLDPY_QUALNAME(type, name_obj); + + assert(!PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC)); + return (PyObject *) heap_type; +} + +/// dynamic_attr: Allow the garbage collector to traverse the internal instance `__dict__`. +extern "C" inline int pybind11_traverse(PyObject *self, visitproc visit, void *arg) { +#if PY_VERSION_HEX >= 0x030D0000 + PyObject_VisitManagedDict(self, visit, arg); +#else + PyObject *&dict = *_PyObject_GetDictPtr(self); + Py_VISIT(dict); +#endif +// https://docs.python.org/3/c-api/typeobj.html#c.PyTypeObject.tp_traverse +#if PY_VERSION_HEX >= 0x03090000 + Py_VISIT(Py_TYPE(self)); +#endif + return 0; +} + +/// dynamic_attr: Allow the GC to clear the dictionary. +extern "C" inline int pybind11_clear(PyObject *self) { +#if PY_VERSION_HEX >= 0x030D0000 + PyObject_ClearManagedDict(self); +#else + PyObject *&dict = *_PyObject_GetDictPtr(self); + Py_CLEAR(dict); +#endif + return 0; +} + +/// Give instances of this type a `__dict__` and opt into garbage collection. +inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type) { + auto *type = &heap_type->ht_type; + type->tp_flags |= Py_TPFLAGS_HAVE_GC; +#if PY_VERSION_HEX < 0x030B0000 + type->tp_dictoffset = type->tp_basicsize; // place dict at the end + type->tp_basicsize += (ssize_t) sizeof(PyObject *); // and allocate enough space for it +#else + type->tp_flags |= Py_TPFLAGS_MANAGED_DICT; +#endif + type->tp_traverse = pybind11_traverse; + type->tp_clear = pybind11_clear; + + static PyGetSetDef getset[] = {{ +#if PY_VERSION_HEX < 0x03070000 + const_cast("__dict__"), +#else + "__dict__", +#endif + PyObject_GenericGetDict, + PyObject_GenericSetDict, + nullptr, + nullptr}, + {nullptr, nullptr, nullptr, nullptr, nullptr}}; + type->tp_getset = getset; +} + +/// buffer_protocol: Fill in the view as specified by flags. +extern "C" inline int pybind11_getbuffer(PyObject *obj, Py_buffer *view, int flags) { + // Look for a `get_buffer` implementation in this type's info or any bases (following MRO). + type_info *tinfo = nullptr; + for (auto type : reinterpret_borrow(Py_TYPE(obj)->tp_mro)) { + tinfo = get_type_info((PyTypeObject *) type.ptr()); + if (tinfo && tinfo->get_buffer) { + break; + } + } + if (view == nullptr || !tinfo || !tinfo->get_buffer) { + if (view) { + view->obj = nullptr; + } + set_error(PyExc_BufferError, "pybind11_getbuffer(): Internal error"); + return -1; + } + std::memset(view, 0, sizeof(Py_buffer)); + buffer_info *info = tinfo->get_buffer(obj, tinfo->get_buffer_data); + if ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE && info->readonly) { + delete info; + // view->obj = nullptr; // Was just memset to 0, so not necessary + set_error(PyExc_BufferError, "Writable buffer requested for readonly storage"); + return -1; + } + view->obj = obj; + view->ndim = 1; + view->internal = info; + view->buf = info->ptr; + view->itemsize = info->itemsize; + view->len = view->itemsize; + for (auto s : info->shape) { + view->len *= s; + } + view->readonly = static_cast(info->readonly); + if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT) { + view->format = const_cast(info->format.c_str()); + } + if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) { + view->ndim = (int) info->ndim; + view->strides = info->strides.data(); + view->shape = info->shape.data(); + } + Py_INCREF(view->obj); + return 0; +} + +/// buffer_protocol: Release the resources of the buffer. +extern "C" inline void pybind11_releasebuffer(PyObject *, Py_buffer *view) { + delete (buffer_info *) view->internal; +} + +/// Give this type a buffer interface. +inline void enable_buffer_protocol(PyHeapTypeObject *heap_type) { + heap_type->ht_type.tp_as_buffer = &heap_type->as_buffer; + + heap_type->as_buffer.bf_getbuffer = pybind11_getbuffer; + heap_type->as_buffer.bf_releasebuffer = pybind11_releasebuffer; +} + +/** Create a brand new Python type according to the `type_record` specification. + Return value: New reference. */ +inline PyObject *make_new_python_type(const type_record &rec) { + auto name = reinterpret_steal(PYBIND11_FROM_STRING(rec.name)); + + auto qualname = name; + if (rec.scope && !PyModule_Check(rec.scope.ptr()) && hasattr(rec.scope, "__qualname__")) { + qualname = reinterpret_steal( + PyUnicode_FromFormat("%U.%U", rec.scope.attr("__qualname__").ptr(), name.ptr())); + } + + object module_; + if (rec.scope) { + if (hasattr(rec.scope, "__module__")) { + module_ = rec.scope.attr("__module__"); + } else if (hasattr(rec.scope, "__name__")) { + module_ = rec.scope.attr("__name__"); + } + } + + const auto *full_name = c_str( +#if !defined(PYPY_VERSION) + module_ ? str(module_).cast() + "." + rec.name : +#endif + rec.name); + + char *tp_doc = nullptr; + if (rec.doc && options::show_user_defined_docstrings()) { + /* Allocate memory for docstring (using PyObject_MALLOC, since + Python will free this later on) */ + size_t size = std::strlen(rec.doc) + 1; + tp_doc = (char *) PyObject_MALLOC(size); + std::memcpy((void *) tp_doc, rec.doc, size); + } + + auto &internals = get_internals(); + auto bases = tuple(rec.bases); + auto *base = (bases.empty()) ? internals.instance_base : bases[0].ptr(); + + /* Danger zone: from now (and until PyType_Ready), make sure to + issue no Python C API calls which could potentially invoke the + garbage collector (the GC will call type_traverse(), which will in + turn find the newly constructed type in an invalid state) */ + auto *metaclass + = rec.metaclass.ptr() ? (PyTypeObject *) rec.metaclass.ptr() : internals.default_metaclass; + + auto *heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0); + if (!heap_type) { + pybind11_fail(std::string(rec.name) + ": Unable to create type object!"); + } + + heap_type->ht_name = name.release().ptr(); +#ifdef PYBIND11_BUILTIN_QUALNAME + heap_type->ht_qualname = qualname.inc_ref().ptr(); +#endif + + auto *type = &heap_type->ht_type; + type->tp_name = full_name; + type->tp_doc = tp_doc; + type->tp_base = type_incref((PyTypeObject *) base); + type->tp_basicsize = static_cast(sizeof(instance)); + if (!bases.empty()) { + type->tp_bases = bases.release().ptr(); + } + + /* Don't inherit base __init__ */ + type->tp_init = pybind11_object_init; + + /* Supported protocols */ + type->tp_as_number = &heap_type->as_number; + type->tp_as_sequence = &heap_type->as_sequence; + type->tp_as_mapping = &heap_type->as_mapping; + type->tp_as_async = &heap_type->as_async; + + /* Flags */ + type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE; + if (!rec.is_final) { + type->tp_flags |= Py_TPFLAGS_BASETYPE; + } + + if (rec.dynamic_attr) { + enable_dynamic_attributes(heap_type); + } + + if (rec.buffer_protocol) { + enable_buffer_protocol(heap_type); + } + + if (rec.custom_type_setup_callback) { + rec.custom_type_setup_callback(heap_type); + } + + if (PyType_Ready(type) < 0) { + pybind11_fail(std::string(rec.name) + ": PyType_Ready failed: " + error_string()); + } + + assert(!rec.dynamic_attr || PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC)); + + /* Register type with the parent scope */ + if (rec.scope) { + setattr(rec.scope, rec.name, (PyObject *) type); + } else { + Py_INCREF(type); // Keep it alive forever (reference leak) + } + + if (module_) { // Needed by pydoc + setattr((PyObject *) type, "__module__", module_); + } + + PYBIND11_SET_OLDPY_QUALNAME(type, qualname); + + return (PyObject *) type; +} + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/pybind11/detail/common.h b/llmeval-env/lib/python3.10/site-packages/torch/include/pybind11/detail/common.h new file mode 100644 index 0000000000000000000000000000000000000000..454e6061b05ecda47c62d860173dc7faf5a00d89 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/pybind11/detail/common.h @@ -0,0 +1,1267 @@ +/* + pybind11/detail/common.h -- Basic macros + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#define PYBIND11_VERSION_MAJOR 2 +#define PYBIND11_VERSION_MINOR 12 +#define PYBIND11_VERSION_PATCH 0 + +// Similar to Python's convention: https://docs.python.org/3/c-api/apiabiversion.html +// Additional convention: 0xD = dev +#define PYBIND11_VERSION_HEX 0x020C0000 + +// Define some generic pybind11 helper macros for warning management. +// +// Note that compiler-specific push/pop pairs are baked into the +// PYBIND11_NAMESPACE_BEGIN/PYBIND11_NAMESPACE_END pair of macros. Therefore manual +// PYBIND11_WARNING_PUSH/PYBIND11_WARNING_POP are usually only needed in `#include` sections. +// +// If you find you need to suppress a warning, please try to make the suppression as local as +// possible using these macros. Please also be sure to push/pop with the pybind11 macros. Please +// only use compiler specifics if you need to check specific versions, e.g. Apple Clang vs. vanilla +// Clang. +#if defined(_MSC_VER) +# define PYBIND11_COMPILER_MSVC +# define PYBIND11_PRAGMA(...) __pragma(__VA_ARGS__) +# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(warning(push)) +# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(warning(pop)) +#elif defined(__INTEL_COMPILER) +# define PYBIND11_COMPILER_INTEL +# define PYBIND11_PRAGMA(...) _Pragma(#__VA_ARGS__) +# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(warning push) +# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(warning pop) +#elif defined(__clang__) +# define PYBIND11_COMPILER_CLANG +# define PYBIND11_PRAGMA(...) _Pragma(#__VA_ARGS__) +# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(clang diagnostic push) +# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(clang diagnostic push) +#elif defined(__GNUC__) +# define PYBIND11_COMPILER_GCC +# define PYBIND11_PRAGMA(...) _Pragma(#__VA_ARGS__) +# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(GCC diagnostic push) +# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(GCC diagnostic pop) +#endif + +#ifdef PYBIND11_COMPILER_MSVC +# define PYBIND11_WARNING_DISABLE_MSVC(name) PYBIND11_PRAGMA(warning(disable : name)) +#else +# define PYBIND11_WARNING_DISABLE_MSVC(name) +#endif + +#ifdef PYBIND11_COMPILER_CLANG +# define PYBIND11_WARNING_DISABLE_CLANG(name) PYBIND11_PRAGMA(clang diagnostic ignored name) +#else +# define PYBIND11_WARNING_DISABLE_CLANG(name) +#endif + +#ifdef PYBIND11_COMPILER_GCC +# define PYBIND11_WARNING_DISABLE_GCC(name) PYBIND11_PRAGMA(GCC diagnostic ignored name) +#else +# define PYBIND11_WARNING_DISABLE_GCC(name) +#endif + +#ifdef PYBIND11_COMPILER_INTEL +# define PYBIND11_WARNING_DISABLE_INTEL(name) PYBIND11_PRAGMA(warning disable name) +#else +# define PYBIND11_WARNING_DISABLE_INTEL(name) +#endif + +#define PYBIND11_NAMESPACE_BEGIN(name) \ + namespace name { \ + PYBIND11_WARNING_PUSH + +#define PYBIND11_NAMESPACE_END(name) \ + PYBIND11_WARNING_POP \ + } + +// Robust support for some features and loading modules compiled against different pybind versions +// requires forcing hidden visibility on pybind code, so we enforce this by setting the attribute +// on the main `pybind11` namespace. +#if !defined(PYBIND11_NAMESPACE) +# ifdef __GNUG__ +# define PYBIND11_NAMESPACE pybind11 __attribute__((visibility("hidden"))) +# else +# define PYBIND11_NAMESPACE pybind11 +# endif +#endif + +#if !(defined(_MSC_VER) && __cplusplus == 199711L) +# if __cplusplus >= 201402L +# define PYBIND11_CPP14 +# if __cplusplus >= 201703L +# define PYBIND11_CPP17 +# if __cplusplus >= 202002L +# define PYBIND11_CPP20 +// Please update tests/pybind11_tests.cpp `cpp_std()` when adding a macro here. +# endif +# endif +# endif +#elif defined(_MSC_VER) && __cplusplus == 199711L +// MSVC sets _MSVC_LANG rather than __cplusplus (supposedly until the standard is fully +// implemented). Unless you use the /Zc:__cplusplus flag on Visual Studio 2017 15.7 Preview 3 +// or newer. +# if _MSVC_LANG >= 201402L +# define PYBIND11_CPP14 +# if _MSVC_LANG > 201402L +# define PYBIND11_CPP17 +# if _MSVC_LANG >= 202002L +# define PYBIND11_CPP20 +# endif +# endif +# endif +#endif + +#if defined(PYBIND11_CPP20) +# define PYBIND11_CONSTINIT constinit +# define PYBIND11_DTOR_CONSTEXPR constexpr +#else +# define PYBIND11_CONSTINIT +# define PYBIND11_DTOR_CONSTEXPR +#endif + +// Compiler version assertions +#if defined(__INTEL_COMPILER) +# if __INTEL_COMPILER < 1800 +# error pybind11 requires Intel C++ compiler v18 or newer +# elif __INTEL_COMPILER < 1900 && defined(PYBIND11_CPP14) +# error pybind11 supports only C++11 with Intel C++ compiler v18. Use v19 or newer for C++14. +# endif +/* The following pragma cannot be pop'ed: + https://community.intel.com/t5/Intel-C-Compiler/Inline-and-no-inline-warning/td-p/1216764 */ +# pragma warning disable 2196 // warning #2196: routine is both "inline" and "noinline" +#elif defined(__clang__) && !defined(__apple_build_version__) +# if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 3) +# error pybind11 requires clang 3.3 or newer +# endif +#elif defined(__clang__) +// Apple changes clang version macros to its Xcode version; the first Xcode release based on +// (upstream) clang 3.3 was Xcode 5: +# if __clang_major__ < 5 +# error pybind11 requires Xcode/clang 5.0 or newer +# endif +#elif defined(__GNUG__) +# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8) +# error pybind11 requires gcc 4.8 or newer +# endif +#elif defined(_MSC_VER) +# if _MSC_VER < 1910 +# error pybind11 2.10+ requires MSVC 2017 or newer +# endif +#endif + +#if !defined(PYBIND11_EXPORT) +# if defined(WIN32) || defined(_WIN32) +# define PYBIND11_EXPORT __declspec(dllexport) +# else +# define PYBIND11_EXPORT __attribute__((visibility("default"))) +# endif +#endif + +#if !defined(PYBIND11_EXPORT_EXCEPTION) +# if defined(__apple_build_version__) +# define PYBIND11_EXPORT_EXCEPTION PYBIND11_EXPORT +# else +# define PYBIND11_EXPORT_EXCEPTION +# endif +#endif + +// For CUDA, GCC7, GCC8: +// PYBIND11_NOINLINE_FORCED is incompatible with `-Wattributes -Werror`. +// When defining PYBIND11_NOINLINE_FORCED, it is best to also use `-Wno-attributes`. +// However, the measured shared-library size saving when using noinline are only +// 1.7% for CUDA, -0.2% for GCC7, and 0.0% for GCC8 (using -DCMAKE_BUILD_TYPE=MinSizeRel, +// the default under pybind11/tests). +#if !defined(PYBIND11_NOINLINE_FORCED) \ + && (defined(__CUDACC__) || (defined(__GNUC__) && (__GNUC__ == 7 || __GNUC__ == 8))) +# define PYBIND11_NOINLINE_DISABLED +#endif + +// The PYBIND11_NOINLINE macro is for function DEFINITIONS. +// In contrast, FORWARD DECLARATIONS should never use this macro: +// https://stackoverflow.com/questions/9317473/forward-declaration-of-inline-functions +#if defined(PYBIND11_NOINLINE_DISABLED) // Option for maximum portability and experimentation. +# define PYBIND11_NOINLINE inline +#elif defined(_MSC_VER) +# define PYBIND11_NOINLINE __declspec(noinline) inline +#else +# define PYBIND11_NOINLINE __attribute__((noinline)) inline +#endif + +#if defined(__MINGW32__) +// For unknown reasons all PYBIND11_DEPRECATED member trigger a warning when declared +// whether it is used or not +# define PYBIND11_DEPRECATED(reason) +#elif defined(PYBIND11_CPP14) +# define PYBIND11_DEPRECATED(reason) [[deprecated(reason)]] +#else +# define PYBIND11_DEPRECATED(reason) __attribute__((deprecated(reason))) +#endif + +#if defined(PYBIND11_CPP17) +# define PYBIND11_MAYBE_UNUSED [[maybe_unused]] +#elif defined(_MSC_VER) && !defined(__clang__) +# define PYBIND11_MAYBE_UNUSED +#else +# define PYBIND11_MAYBE_UNUSED __attribute__((__unused__)) +#endif + +/* Don't let Python.h #define (v)snprintf as macro because they are implemented + properly in Visual Studio since 2015. */ +#if defined(_MSC_VER) +# define HAVE_SNPRINTF 1 +#endif + +/// Include Python header, disable linking to pythonX_d.lib on Windows in debug mode +#if defined(_MSC_VER) +PYBIND11_WARNING_PUSH +PYBIND11_WARNING_DISABLE_MSVC(4505) +// C4505: 'PySlice_GetIndicesEx': unreferenced local function has been removed (PyPy only) +# if defined(_DEBUG) && !defined(Py_DEBUG) +// Workaround for a VS 2022 issue. +// NOTE: This workaround knowingly violates the Python.h include order requirement: +// https://docs.python.org/3/c-api/intro.html#include-files +// See https://github.com/pybind/pybind11/pull/3497 for full context. +# include +# if _MSVC_STL_VERSION >= 143 +# include +# endif +# define PYBIND11_DEBUG_MARKER +# undef _DEBUG +# endif +#endif + +// https://en.cppreference.com/w/c/chrono/localtime +#if defined(__STDC_LIB_EXT1__) && !defined(__STDC_WANT_LIB_EXT1__) +# define __STDC_WANT_LIB_EXT1__ +#endif + +#ifdef __has_include +// std::optional (but including it in c++14 mode isn't allowed) +# if defined(PYBIND11_CPP17) && __has_include() +# define PYBIND11_HAS_OPTIONAL 1 +# endif +// std::experimental::optional (but not allowed in c++11 mode) +# if defined(PYBIND11_CPP14) && (__has_include() && \ + !__has_include()) +# define PYBIND11_HAS_EXP_OPTIONAL 1 +# endif +// std::variant +# if defined(PYBIND11_CPP17) && __has_include() +# define PYBIND11_HAS_VARIANT 1 +# endif +#elif defined(_MSC_VER) && defined(PYBIND11_CPP17) +# define PYBIND11_HAS_OPTIONAL 1 +# define PYBIND11_HAS_VARIANT 1 +#endif + +#if defined(PYBIND11_CPP17) +# if defined(__has_include) +# if __has_include() +# define PYBIND11_HAS_STRING_VIEW +# endif +# elif defined(_MSC_VER) +# define PYBIND11_HAS_STRING_VIEW +# endif +#endif + +#include +// Reminder: WITH_THREAD is always defined if PY_VERSION_HEX >= 0x03070000 +#if PY_VERSION_HEX < 0x03060000 +# error "PYTHON < 3.6 IS UNSUPPORTED. pybind11 v2.9 was the last to support Python 2 and 3.5." +#endif +#include +#include + +/* Python #defines overrides on all sorts of core functions, which + tends to weak havok in C++ codebases that expect these to work + like regular functions (potentially with several overloads) */ +#if defined(isalnum) +# undef isalnum +# undef isalpha +# undef islower +# undef isspace +# undef isupper +# undef tolower +# undef toupper +#endif + +#if defined(copysign) +# undef copysign +#endif + +#if defined(PYBIND11_NUMPY_1_ONLY) +# define PYBIND11_INTERNAL_NUMPY_1_ONLY_DETECTED +#endif + +#if defined(PYPY_VERSION) && !defined(PYBIND11_SIMPLE_GIL_MANAGEMENT) +# define PYBIND11_SIMPLE_GIL_MANAGEMENT +#endif + +#if defined(_MSC_VER) +# if defined(PYBIND11_DEBUG_MARKER) +# define _DEBUG +# undef PYBIND11_DEBUG_MARKER +# endif +PYBIND11_WARNING_POP +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(__has_include) +# if __has_include() +# include +# endif +#endif + +// Must be after including or one of the other headers specified by the standard +#if defined(__cpp_lib_char8_t) && __cpp_lib_char8_t >= 201811L +# define PYBIND11_HAS_U8STRING +#endif + +// See description of PR #4246: +#if !defined(PYBIND11_NO_ASSERT_GIL_HELD_INCREF_DECREF) && !defined(NDEBUG) \ + && !defined(PYPY_VERSION) && !defined(PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF) +# define PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF +#endif + +// #define PYBIND11_STR_LEGACY_PERMISSIVE +// If DEFINED, pybind11::str can hold PyUnicodeObject or PyBytesObject +// (probably surprising and never documented, but this was the +// legacy behavior until and including v2.6.x). As a side-effect, +// pybind11::isinstance() is true for both pybind11::str and +// pybind11::bytes. +// If UNDEFINED, pybind11::str can only hold PyUnicodeObject, and +// pybind11::isinstance() is true only for pybind11::str. +// However, for Python 2 only (!), the pybind11::str caster +// implicitly decoded bytes to PyUnicodeObject. This was to ease +// the transition from the legacy behavior to the non-permissive +// behavior. + +/// Compatibility macros for Python 2 / Python 3 versions TODO: remove +#define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyInstanceMethod_New(ptr) +#define PYBIND11_INSTANCE_METHOD_CHECK PyInstanceMethod_Check +#define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyInstanceMethod_GET_FUNCTION +#define PYBIND11_BYTES_CHECK PyBytes_Check +#define PYBIND11_BYTES_FROM_STRING PyBytes_FromString +#define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyBytes_FromStringAndSize +#define PYBIND11_BYTES_AS_STRING_AND_SIZE PyBytes_AsStringAndSize +#define PYBIND11_BYTES_AS_STRING PyBytes_AsString +#define PYBIND11_BYTES_SIZE PyBytes_Size +#define PYBIND11_LONG_CHECK(o) PyLong_Check(o) +#define PYBIND11_LONG_AS_LONGLONG(o) PyLong_AsLongLong(o) +#define PYBIND11_LONG_FROM_SIGNED(o) PyLong_FromSsize_t((ssize_t) (o)) +#define PYBIND11_LONG_FROM_UNSIGNED(o) PyLong_FromSize_t((size_t) (o)) +#define PYBIND11_BYTES_NAME "bytes" +#define PYBIND11_STRING_NAME "str" +#define PYBIND11_SLICE_OBJECT PyObject +#define PYBIND11_FROM_STRING PyUnicode_FromString +#define PYBIND11_STR_TYPE ::pybind11::str +#define PYBIND11_BOOL_ATTR "__bool__" +#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_bool) +#define PYBIND11_BUILTINS_MODULE "builtins" +// Providing a separate declaration to make Clang's -Wmissing-prototypes happy. +// See comment for PYBIND11_MODULE below for why this is marked "maybe unused". +#define PYBIND11_PLUGIN_IMPL(name) \ + extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT PyObject *PyInit_##name(); \ + extern "C" PYBIND11_EXPORT PyObject *PyInit_##name() + +#define PYBIND11_TRY_NEXT_OVERLOAD ((PyObject *) 1) // special failure return code +#define PYBIND11_STRINGIFY(x) #x +#define PYBIND11_TOSTRING(x) PYBIND11_STRINGIFY(x) +#define PYBIND11_CONCAT(first, second) first##second +#define PYBIND11_ENSURE_INTERNALS_READY pybind11::detail::get_internals(); + +#define PYBIND11_CHECK_PYTHON_VERSION \ + { \ + const char *compiled_ver \ + = PYBIND11_TOSTRING(PY_MAJOR_VERSION) "." PYBIND11_TOSTRING(PY_MINOR_VERSION); \ + const char *runtime_ver = Py_GetVersion(); \ + size_t len = std::strlen(compiled_ver); \ + if (std::strncmp(runtime_ver, compiled_ver, len) != 0 \ + || (runtime_ver[len] >= '0' && runtime_ver[len] <= '9')) { \ + PyErr_Format(PyExc_ImportError, \ + "Python version mismatch: module was compiled for Python %s, " \ + "but the interpreter version is incompatible: %s.", \ + compiled_ver, \ + runtime_ver); \ + return nullptr; \ + } \ + } + +#define PYBIND11_CATCH_INIT_EXCEPTIONS \ + catch (pybind11::error_already_set & e) { \ + pybind11::raise_from(e, PyExc_ImportError, "initialization failed"); \ + return nullptr; \ + } \ + catch (const std::exception &e) { \ + ::pybind11::set_error(PyExc_ImportError, e.what()); \ + return nullptr; \ + } + +/** \rst + ***Deprecated in favor of PYBIND11_MODULE*** + + This macro creates the entry point that will be invoked when the Python interpreter + imports a plugin library. Please create a `module_` in the function body and return + the pointer to its underlying Python object at the end. + + .. code-block:: cpp + + PYBIND11_PLUGIN(example) { + pybind11::module_ m("example", "pybind11 example plugin"); + /// Set up bindings here + return m.ptr(); + } +\endrst */ +#define PYBIND11_PLUGIN(name) \ + PYBIND11_DEPRECATED("PYBIND11_PLUGIN is deprecated, use PYBIND11_MODULE") \ + static PyObject *pybind11_init(); \ + PYBIND11_PLUGIN_IMPL(name) { \ + PYBIND11_CHECK_PYTHON_VERSION \ + PYBIND11_ENSURE_INTERNALS_READY \ + try { \ + return pybind11_init(); \ + } \ + PYBIND11_CATCH_INIT_EXCEPTIONS \ + } \ + PyObject *pybind11_init() + +/** \rst + This macro creates the entry point that will be invoked when the Python interpreter + imports an extension module. The module name is given as the first argument and it + should not be in quotes. The second macro argument defines a variable of type + `py::module_` which can be used to initialize the module. + + The entry point is marked as "maybe unused" to aid dead-code detection analysis: + since the entry point is typically only looked up at runtime and not referenced + during translation, it would otherwise appear as unused ("dead") code. + + .. code-block:: cpp + + PYBIND11_MODULE(example, m) { + m.doc() = "pybind11 example module"; + + // Add bindings here + m.def("foo", []() { + return "Hello, World!"; + }); + } +\endrst */ +#define PYBIND11_MODULE(name, variable) \ + static ::pybind11::module_::module_def PYBIND11_CONCAT(pybind11_module_def_, name) \ + PYBIND11_MAYBE_UNUSED; \ + PYBIND11_MAYBE_UNUSED \ + static void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &); \ + PYBIND11_PLUGIN_IMPL(name) { \ + PYBIND11_CHECK_PYTHON_VERSION \ + PYBIND11_ENSURE_INTERNALS_READY \ + auto m = ::pybind11::module_::create_extension_module( \ + PYBIND11_TOSTRING(name), nullptr, &PYBIND11_CONCAT(pybind11_module_def_, name)); \ + try { \ + PYBIND11_CONCAT(pybind11_init_, name)(m); \ + return m.ptr(); \ + } \ + PYBIND11_CATCH_INIT_EXCEPTIONS \ + } \ + void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ & (variable)) + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +using ssize_t = Py_ssize_t; +using size_t = std::size_t; + +template +inline ssize_t ssize_t_cast(const IntType &val) { + static_assert(sizeof(IntType) <= sizeof(ssize_t), "Implicit narrowing is not permitted."); + return static_cast(val); +} + +/// Approach used to cast a previously unknown C++ instance into a Python object +enum class return_value_policy : uint8_t { + /** This is the default return value policy, which falls back to the policy + return_value_policy::take_ownership when the return value is a pointer. + Otherwise, it uses return_value::move or return_value::copy for rvalue + and lvalue references, respectively. See below for a description of what + all of these different policies do. */ + automatic = 0, + + /** As above, but use policy return_value_policy::reference when the return + value is a pointer. This is the default conversion policy for function + arguments when calling Python functions manually from C++ code (i.e. via + handle::operator()). You probably won't need to use this. */ + automatic_reference, + + /** Reference an existing object (i.e. do not create a new copy) and take + ownership. Python will call the destructor and delete operator when the + object's reference count reaches zero. Undefined behavior ensues when + the C++ side does the same.. */ + take_ownership, + + /** Create a new copy of the returned object, which will be owned by + Python. This policy is comparably safe because the lifetimes of the two + instances are decoupled. */ + copy, + + /** Use std::move to move the return value contents into a new instance + that will be owned by Python. This policy is comparably safe because the + lifetimes of the two instances (move source and destination) are + decoupled. */ + move, + + /** Reference an existing object, but do not take ownership. The C++ side + is responsible for managing the object's lifetime and deallocating it + when it is no longer used. Warning: undefined behavior will ensue when + the C++ side deletes an object that is still referenced and used by + Python. */ + reference, + + /** This policy only applies to methods and properties. It references the + object without taking ownership similar to the above + return_value_policy::reference policy. In contrast to that policy, the + function or property's implicit this argument (called the parent) is + considered to be the the owner of the return value (the child). + pybind11 then couples the lifetime of the parent to the child via a + reference relationship that ensures that the parent cannot be garbage + collected while Python is still using the child. More advanced + variations of this scheme are also possible using combinations of + return_value_policy::reference and the keep_alive call policy */ + reference_internal +}; + +PYBIND11_NAMESPACE_BEGIN(detail) + +inline static constexpr int log2(size_t n, int k = 0) { + return (n <= 1) ? k : log2(n >> 1, k + 1); +} + +// Returns the size as a multiple of sizeof(void *), rounded up. +inline static constexpr size_t size_in_ptrs(size_t s) { + return 1 + ((s - 1) >> log2(sizeof(void *))); +} + +/** + * The space to allocate for simple layout instance holders (see below) in multiple of the size of + * a pointer (e.g. 2 means 16 bytes on 64-bit architectures). The default is the minimum required + * to holder either a std::unique_ptr or std::shared_ptr (which is almost always + * sizeof(std::shared_ptr)). + */ +constexpr size_t instance_simple_holder_in_ptrs() { + static_assert(sizeof(std::shared_ptr) >= sizeof(std::unique_ptr), + "pybind assumes std::shared_ptrs are at least as big as std::unique_ptrs"); + return size_in_ptrs(sizeof(std::shared_ptr)); +} + +// Forward declarations +struct type_info; +struct value_and_holder; + +struct nonsimple_values_and_holders { + void **values_and_holders; + uint8_t *status; +}; + +/// The 'instance' type which needs to be standard layout (need to be able to use 'offsetof') +struct instance { + PyObject_HEAD + /// Storage for pointers and holder; see simple_layout, below, for a description + union { + void *simple_value_holder[1 + instance_simple_holder_in_ptrs()]; + nonsimple_values_and_holders nonsimple; + }; + /// Weak references + PyObject *weakrefs; + /// If true, the pointer is owned which means we're free to manage it with a holder. + bool owned : 1; + /** + * An instance has two possible value/holder layouts. + * + * Simple layout (when this flag is true), means the `simple_value_holder` is set with a + * pointer and the holder object governing that pointer, i.e. [val1*][holder]. This layout is + * applied whenever there is no python-side multiple inheritance of bound C++ types *and* the + * type's holder will fit in the default space (which is large enough to hold either a + * std::unique_ptr or std::shared_ptr). + * + * Non-simple layout applies when using custom holders that require more space than + * `shared_ptr` (which is typically the size of two pointers), or when multiple inheritance is + * used on the python side. Non-simple layout allocates the required amount of memory to have + * multiple bound C++ classes as parents. Under this layout, `nonsimple.values_and_holders` is + * set to a pointer to allocated space of the required space to hold a sequence of value + * pointers and holders followed `status`, a set of bit flags (1 byte each), i.e. + * [val1*][holder1][val2*][holder2]...[bb...] where each [block] is rounded up to a multiple + * of `sizeof(void *)`. `nonsimple.status` is, for convenience, a pointer to the beginning of + * the [bb...] block (but not independently allocated). + * + * Status bits indicate whether the associated holder is constructed (& + * status_holder_constructed) and whether the value pointer is registered (& + * status_instance_registered) in `registered_instances`. + */ + bool simple_layout : 1; + /// For simple layout, tracks whether the holder has been constructed + bool simple_holder_constructed : 1; + /// For simple layout, tracks whether the instance is registered in `registered_instances` + bool simple_instance_registered : 1; + /// If true, get_internals().patients has an entry for this object + bool has_patients : 1; + + /// Initializes all of the above type/values/holders data (but not the instance values + /// themselves) + void allocate_layout(); + + /// Destroys/deallocates all of the above + void deallocate_layout(); + + /// Returns the value_and_holder wrapper for the given type (or the first, if `find_type` + /// omitted). Returns a default-constructed (with `.inst = nullptr`) object on failure if + /// `throw_if_missing` is false. + value_and_holder get_value_and_holder(const type_info *find_type = nullptr, + bool throw_if_missing = true); + + /// Bit values for the non-simple status flags + static constexpr uint8_t status_holder_constructed = 1; + static constexpr uint8_t status_instance_registered = 2; +}; + +static_assert(std::is_standard_layout::value, + "Internal error: `pybind11::detail::instance` is not standard layout!"); + +/// from __cpp_future__ import (convenient aliases from C++14/17) +#if defined(PYBIND11_CPP14) +using std::conditional_t; +using std::enable_if_t; +using std::remove_cv_t; +using std::remove_reference_t; +#else +template +using enable_if_t = typename std::enable_if::type; +template +using conditional_t = typename std::conditional::type; +template +using remove_cv_t = typename std::remove_cv::type; +template +using remove_reference_t = typename std::remove_reference::type; +#endif + +#if defined(PYBIND11_CPP20) +using std::remove_cvref; +using std::remove_cvref_t; +#else +template +struct remove_cvref { + using type = remove_cv_t>; +}; +template +using remove_cvref_t = typename remove_cvref::type; +#endif + +/// Example usage: is_same_ignoring_cvref::value +template +using is_same_ignoring_cvref = std::is_same, U>; + +/// Index sequences +#if defined(PYBIND11_CPP14) +using std::index_sequence; +using std::make_index_sequence; +#else +template +struct index_sequence {}; +template +struct make_index_sequence_impl : make_index_sequence_impl {}; +template +struct make_index_sequence_impl<0, S...> { + using type = index_sequence; +}; +template +using make_index_sequence = typename make_index_sequence_impl::type; +#endif + +/// Make an index sequence of the indices of true arguments +template +struct select_indices_impl { + using type = ISeq; +}; +template +struct select_indices_impl, I, B, Bs...> + : select_indices_impl, index_sequence>, + I + 1, + Bs...> {}; +template +using select_indices = typename select_indices_impl, 0, Bs...>::type; + +/// Backports of std::bool_constant and std::negation to accommodate older compilers +template +using bool_constant = std::integral_constant; +template +struct negation : bool_constant {}; + +// PGI/Intel cannot detect operator delete with the "compatible" void_t impl, so +// using the new one (C++14 defect, so generally works on newer compilers, even +// if not in C++17 mode) +#if defined(__PGIC__) || defined(__INTEL_COMPILER) +template +using void_t = void; +#else +template +struct void_t_impl { + using type = void; +}; +template +using void_t = typename void_t_impl::type; +#endif + +/// Compile-time all/any/none of that check the boolean value of all template types +#if defined(__cpp_fold_expressions) && !(defined(_MSC_VER) && (_MSC_VER < 1916)) +template +using all_of = bool_constant<(Ts::value && ...)>; +template +using any_of = bool_constant<(Ts::value || ...)>; +#elif !defined(_MSC_VER) +template +struct bools {}; +template +using all_of = std::is_same, bools>; +template +using any_of = negation...>>; +#else +// MSVC has trouble with the above, but supports std::conjunction, which we can use instead (albeit +// at a slight loss of compilation efficiency). +template +using all_of = std::conjunction; +template +using any_of = std::disjunction; +#endif +template +using none_of = negation>; + +template class... Predicates> +using satisfies_all_of = all_of...>; +template class... Predicates> +using satisfies_any_of = any_of...>; +template class... Predicates> +using satisfies_none_of = none_of...>; + +/// Strip the class from a method type +template +struct remove_class {}; +template +struct remove_class { + using type = R(A...); +}; +template +struct remove_class { + using type = R(A...); +}; +#ifdef __cpp_noexcept_function_type +template +struct remove_class { + using type = R(A...); +}; +template +struct remove_class { + using type = R(A...); +}; +#endif +/// Helper template to strip away type modifiers +template +struct intrinsic_type { + using type = T; +}; +template +struct intrinsic_type { + using type = typename intrinsic_type::type; +}; +template +struct intrinsic_type { + using type = typename intrinsic_type::type; +}; +template +struct intrinsic_type { + using type = typename intrinsic_type::type; +}; +template +struct intrinsic_type { + using type = typename intrinsic_type::type; +}; +template +struct intrinsic_type { + using type = typename intrinsic_type::type; +}; +template +struct intrinsic_type { + using type = typename intrinsic_type::type; +}; +template +using intrinsic_t = typename intrinsic_type::type; + +/// Helper type to replace 'void' in some expressions +struct void_type {}; + +/// Helper template which holds a list of types +template +struct type_list {}; + +/// Compile-time integer sum +#ifdef __cpp_fold_expressions +template +constexpr size_t constexpr_sum(Ts... ns) { + return (0 + ... + size_t{ns}); +} +#else +constexpr size_t constexpr_sum() { return 0; } +template +constexpr size_t constexpr_sum(T n, Ts... ns) { + return size_t{n} + constexpr_sum(ns...); +} +#endif + +PYBIND11_NAMESPACE_BEGIN(constexpr_impl) +/// Implementation details for constexpr functions +constexpr int first(int i) { return i; } +template +constexpr int first(int i, T v, Ts... vs) { + return v ? i : first(i + 1, vs...); +} + +constexpr int last(int /*i*/, int result) { return result; } +template +constexpr int last(int i, int result, T v, Ts... vs) { + return last(i + 1, v ? i : result, vs...); +} +PYBIND11_NAMESPACE_END(constexpr_impl) + +/// Return the index of the first type in Ts which satisfies Predicate. +/// Returns sizeof...(Ts) if none match. +template