diff --git a/ckpts/universal/global_step20/zero/10.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/10.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..afd4f7fd8680ad6298a2d4c7d0c0935ca75980e6 --- /dev/null +++ b/ckpts/universal/global_step20/zero/10.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41dec19e41e01e697ba09437d7a19d3b6ced65f1bcf710af3d3ab2eb1aed5f70 +size 50332828 diff --git a/ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..d9868d86c2f596adc7dae229af9f8b1d4aca34f7 --- /dev/null +++ b/ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e896fd7e1e34ab0d6fed56670fcd8268239dc24e787e38f5b7d2366b0e9e09f +size 33555612 diff --git a/ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..c121de5aec681f643d88c729ad1d6bb93fe33ac8 --- /dev/null +++ b/ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8889a8cf12e2e8c498b8ede4eb6c879a6e49eba3d7eb7bfcf62cbbbac59d5018 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ATen.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ATen.h new file mode 100644 index 0000000000000000000000000000000000000000..effdd469d19b91316aa21ae99d43055f49c950eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ATen.h @@ -0,0 +1,37 @@ +#pragma once + +#if !defined(_MSC_VER) && __cplusplus < 201703L +#error C++17 or later compatible compiler is required to use ATen. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// TODO: try to remove this +// There is some back story, see https://github.com/pytorch/pytorch/issues/48684 +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h b/venv/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h new file mode 100644 index 0000000000000000000000000000000000000000..0275ef099b03d714b916b9d0d09c4827724bf58c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h @@ -0,0 +1,153 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Defines the accumulation type for a scalar type. +// Example: +// using accscalar_t = acc_type; +// +// Accumulation types are an important concept in numeric computing +// because you frequently want to perform intermediate computations +// at a higher precision than the input and output precision, to avoid +// compounding internal rounding errors. Accumulation is the most +// well-known intermediate computation (it is of great importance for +// sum reduction and matrix multiply, for example), but in PyTorch +// acc_type ends up getting used for all sorts of other intermediate +// computations, so it perhaps would be more accurately (ahem) called an +// "accurate" type. acc_type is especially important for reduced +// precision operations like float16 and bfloat16, where relatively +// benign looking inputs can easily end up overflowing/underflowing. +// +// acc_type is parametrized by whether or not you are running on CUDA +// or not, because on CUDA double precision operations are expensive +// and so by default, we don't actually want to use double as an +// acc_type on CUDA. A lot of things are typed out below, but +// basically, the table is generated by a few rules: +// +// If bool: +// Use 'bool' as acc_type. +// If floating point: +// If CUDA, use 'float' as acc_type (unless scalar_t is double), +// otherwise (CPU) use 'double' +// If integral: +// Use 'int64_t' as acc_type +// +// You're not forced to use this template; if you happen to know +// something specific about your use case, you can specify your own +// desired behavior. This template, however, will give you a reasonable +// default that will work for all dtypes supported in PyTorch. + +#if defined(__CUDACC__) +#include +#include +#elif defined(__HIPCC__) +#include +#include +#endif + +namespace at { + +template +struct AccumulateTypeDevice {}; + +template +struct AccumulateType {}; + +template +struct AccumulateType { + using type = typename AccumulateTypeDevice::type; +}; + +template +struct AccumulateType { + using type = typename AccumulateTypeDevice::type; +}; + +template +using acc_type_device = typename AccumulateTypeDevice::type; + +template +using acc_type = typename AccumulateType::type; + +#define ACC_TYPE(t, acc_t, device_type) \ + template <> \ + struct AccumulateTypeDevice { \ + using type = acc_t; \ + }; +#define MPS_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::MPS) +#define CUDA_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CUDA) +#define CPU_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CPU) + +MPS_ACC_TYPE(BFloat16, float); +MPS_ACC_TYPE(Half, float); +MPS_ACC_TYPE(Float8_e5m2, float); +MPS_ACC_TYPE(Float8_e4m3fn, float); +MPS_ACC_TYPE(Float8_e5m2fnuz, float); +MPS_ACC_TYPE(Float8_e4m3fnuz, float); +MPS_ACC_TYPE(float, float); +MPS_ACC_TYPE(double, float); +MPS_ACC_TYPE(int8_t, int64_t); +MPS_ACC_TYPE(uint8_t, int64_t); +MPS_ACC_TYPE(char, int64_t); +MPS_ACC_TYPE(int16_t, int64_t); +MPS_ACC_TYPE(int32_t, int64_t); +MPS_ACC_TYPE(int64_t, int64_t); +MPS_ACC_TYPE(bool, bool); +MPS_ACC_TYPE(c10::complex, c10::complex); +MPS_ACC_TYPE(c10::complex, c10::complex); +MPS_ACC_TYPE(c10::complex, c10::complex); + +#if defined(__CUDACC__) || defined(__HIPCC__) +CUDA_ACC_TYPE(half, float); +#endif +CUDA_ACC_TYPE(BFloat16, float); +CUDA_ACC_TYPE(Half, float); +CUDA_ACC_TYPE(Float8_e5m2, float); +CUDA_ACC_TYPE(Float8_e4m3fn, float); +CUDA_ACC_TYPE(Float8_e5m2fnuz, float); +CUDA_ACC_TYPE(Float8_e4m3fnuz, float); +CUDA_ACC_TYPE(float, float); +CUDA_ACC_TYPE(double, double); +CUDA_ACC_TYPE(int8_t, int64_t); +CUDA_ACC_TYPE(uint8_t, int64_t); +CUDA_ACC_TYPE(char, int64_t); +CUDA_ACC_TYPE(int16_t, int64_t); +CUDA_ACC_TYPE(int32_t, int64_t); +CUDA_ACC_TYPE(int64_t, int64_t); +CUDA_ACC_TYPE(bool, bool); +CUDA_ACC_TYPE(c10::complex, c10::complex); +CUDA_ACC_TYPE(c10::complex, c10::complex); +CUDA_ACC_TYPE(c10::complex, c10::complex); + +CPU_ACC_TYPE(BFloat16, float); +CPU_ACC_TYPE(Half, float); +CPU_ACC_TYPE(Float8_e5m2, float); +CPU_ACC_TYPE(Float8_e4m3fn, float); +CPU_ACC_TYPE(Float8_e5m2fnuz, float); +CPU_ACC_TYPE(Float8_e4m3fnuz, float); +CPU_ACC_TYPE(float, double); +CPU_ACC_TYPE(double, double); +CPU_ACC_TYPE(int8_t, int64_t); +CPU_ACC_TYPE(uint8_t, int64_t); +CPU_ACC_TYPE(char, int64_t); +CPU_ACC_TYPE(int16_t, int64_t); +CPU_ACC_TYPE(int32_t, int64_t); +CPU_ACC_TYPE(int64_t, int64_t); +CPU_ACC_TYPE(bool, bool); +CPU_ACC_TYPE(c10::complex, c10::complex); +CPU_ACC_TYPE(c10::complex, c10::complex); +CPU_ACC_TYPE(c10::complex, c10::complex); + +TORCH_API c10::ScalarType toAccumulateType( + c10::ScalarType type, + c10::DeviceType device); +TORCH_API c10::ScalarType toAccumulateType(c10::ScalarType type, bool is_cuda); + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/Backend.h b/venv/lib/python3.10/site-packages/torch/include/ATen/Backend.h new file mode 100644 index 0000000000000000000000000000000000000000..9651469e190085d913ba9b5d1ca02085886fc4e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/Backend.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/CPUApplyUtils.h b/venv/lib/python3.10/site-packages/torch/include/ATen/CPUApplyUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..5c524ef97c475a0529b7b18c430be0d39c350aa4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/CPUApplyUtils.h @@ -0,0 +1,343 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace at { + +/* + * The basic strategy for apply is as follows: + * + * 1. Starting with the outermost index, loop until we reach a dimension where + * the data is no longer contiguous, i.e. the stride at that dimension is not + * equal to the size of the tensor defined by the outer dimensions. Let's call + * this outer (contiguous) tensor A. Note that if the Tensor is contiguous, then + * A is equal to the entire Tensor. Let's call the inner tensor B. + * + * 2. We loop through the indices in B, starting at its outermost dimension. For + * example, if B is a 2x2 matrix, then we do: + * + * B[0][0] + * B[0][1] + * B[1][0] + * B[1][1] + * + * We set the offset into the underlying storage as (storageOffset + stride_B * + * index_B), i.e. basically we compute the offset into the storage as we would + * normally for a Tensor. But because we are guaranteed the subsequent data is + * contiguous in memory, we can simply loop for sizeof(A) iterations and perform + * the operation, without having to follow the order described by the strides of + * A. + * + * 3. As an optimization, we merge dimensions of A that are contiguous in + * memory. For example, if A is a 3x3x3x3 tensor narrowed from a 3x3x4x3 tensor, + * then the first two dimensions can be merged for the purposes of APPLY, + * reducing the number of nested loops. + */ + +inline Tensor sort_strides(Tensor& tensor_) { + IntArrayRef strides = tensor_.strides(); + std::vector indices; + indices.reserve(tensor_.ndimension()); + for (const auto i : c10::irange(tensor_.ndimension())) { + indices.push_back(i); + } + std::sort(indices.begin(), indices.end(), [&strides](int64_t i1, int64_t i2) { + return strides[i1] > strides[i2]; + }); + Tensor tensor = tensor_.permute(indices); + return tensor; +} + +template +struct strided_tensor_iter_fixed { + public: + T* data_ = NULL; + int64_t dim_ = 0; + + int64_t counter_[N] = {0}; + int64_t sizes_[N] = {0}; + int64_t strides_[N] = {0}; + + strided_tensor_iter_fixed(strided_tensor_iter_fixed const&) = delete; + void operator=(strided_tensor_iter_fixed const& x) = delete; + strided_tensor_iter_fixed(strided_tensor_iter_fixed&&) = default; + strided_tensor_iter_fixed( + Tensor& tensor, + C10_UNUSED bool sort_strides = false) + : data_(tensor.data_ptr()) { + std::memset(counter_, 0, sizeof(int64_t) * N); + if (tensor.dim() > 0) { + std::memcpy( + sizes_, tensor.sizes().data(), tensor.dim() * sizeof(int64_t)); + std::memcpy( + strides_, tensor.strides().data(), tensor.dim() * sizeof(int64_t)); + } + dim_ = std::get<1>(collapse_dims(sizes_, strides_, tensor.ndimension())); + } +}; + +template +struct strided_tensor_iter { + private: + public: + T* data_ = NULL; + int64_t dim_; + + std::vector counter_; + std::vector sizes_; + std::vector strides_; + + strided_tensor_iter(strided_tensor_iter const&) = delete; + void operator=(strided_tensor_iter const& x) = delete; + strided_tensor_iter(strided_tensor_iter&&) = default; + strided_tensor_iter(Tensor& tensor) + : data_(tensor.data_ptr()), + dim_(tensor.ndimension()), + counter_(dim_, 0), + sizes_(tensor.sizes().vec()), + strides_(tensor.strides().vec()) { + dim_ = std::get<1>(collapse_dims(sizes_.data(), strides_.data(), dim_)); + } +}; + +inline bool _all_equal_numel(at::ArrayRef tensors) { + if (tensors.empty()) + return true; + int64_t all_numel = tensors[0].numel(); + for (const auto i : c10::irange(1, tensors.size())) { + if (tensors[i].numel() != all_numel) + return false; + } + return true; +} + +inline std::string _all_equal_numel_error(at::ArrayRef tensors) { + std::ostringstream oss; + oss << "inconsistent tensor size, expected "; + for (size_t i = 0; i < tensors.size() - 1; i++) { + oss << tensors[i].sizes() << ", "; + } + oss << "and " << tensors[tensors.size() - 1].sizes() + << " to have the same number of elements, but got "; + for (size_t i = 0; i < tensors.size() - 1; i++) { + oss << tensors[i].numel() << ", "; + } + oss << "and " << tensors[tensors.size() - 1].numel() + << " elements respectively"; + return oss.str(); +} + +inline bool _apply_preamble(ArrayRef tensors) { + checkDeviceType("CPU_tensor_apply", tensors, kCPU); + checkLayout("CPU_tensor_apply", tensors, kStrided); + if (!_all_equal_numel(tensors)) + AT_ERROR(_all_equal_numel_error(tensors)); + // An empty tensor has no elements + for (auto& t : tensors) + if (t.numel() == 0) + return false; + return true; +} + +inline int64_t _max_dim_tensors(ArrayRef tensors) { + int64_t dim = 0; + for (auto& t : tensors) + dim = std::max(dim, t.ndimension()); + return dim; +} + +inline void iterate(int64_t /*size*/){}; + +template +inline void iterate(int64_t size, Arg& iter, Args&... iter_tail) { + iter.counter_[iter.dim_ - 1] += size; + iter.data_ = iter.data_ + size * iter.strides_[iter.dim_ - 1]; + iterate(size, iter_tail...); +} + +inline bool iterate_continue() { + return true; +}; + +template +inline bool iterate_continue(Arg& iter, Args&... iter_tail) { + return iter.counter_[iter.dim_ - 1] < iter.sizes_[iter.dim_ - 1] && + iterate_continue(iter_tail...); +} + +inline int64_t max_iterate_size() { + return std::numeric_limits::max(); +}; + +template +inline int64_t max_iterate_size(Arg& iter, Args&... iter_tail) { + return std::min( + (iter.sizes_[iter.dim_ - 1] - iter.counter_[iter.dim_ - 1]), + max_iterate_size(iter_tail...)); +} + +inline void iterate_overflow(){}; + +template +inline void iterate_overflow(Arg& iter, Args&... iter_tail) { + if (iter.counter_[iter.dim_ - 1] == iter.sizes_[iter.dim_ - 1]) { + for (int64_t i = iter.dim_ - 1; i > 0; i--) { + if (iter.counter_[i] == iter.sizes_[i]) { + iter.counter_[i] = 0; + iter.counter_[i - 1]++; + iter.data_ = iter.data_ - (iter.sizes_[i] * iter.strides_[i]) + + iter.strides_[i - 1]; + } + } + } + iterate_overflow(iter_tail...); +} + +inline void forward(int64_t /*offset*/){}; + +template +inline void forward(int64_t offset, Arg& iter, Args&... iter_tail) { + int64_t multi = offset; + for (int64_t i = iter.dim_ - 1; i >= 0; i--) { + int64_t inc = multi % iter.sizes_[i]; + multi = multi / iter.sizes_[i]; + iter.data_ = iter.data_ + inc * iter.strides_[i]; + iter.counter_[i] += inc; + } + forward(offset, iter_tail...); +} + +inline int64_t max_dim() { + return 0; +} + +template +inline int64_t max_dim(Arg& iter, Args&... iter_tail) { + return std::max(iter.dim_, max_dim(iter_tail...)); +} + +inline void apply_op(){}; + +template +inline void apply_op( + int64_t numel, + int64_t offset, + const Op& op, + Args... iters) { + // For 0-dim tensors + if (numel == 1 && max_dim(iters...) == 0) { + op(*iters.data_...); + return; + } + if (offset > 0) + forward(offset, iters...); + // Splitting this into chunks helps the compiler create faster assembly + for (int64_t i = 0; i < numel;) { + for (; iterate_continue(iters...) && i < numel;) { + op(*iters.data_...); + iterate(1, iters...); + i++; + } + iterate_overflow(iters...); + } +} + +/* + Apply a pointwise operator to sequence of tensors + + The calling convention for op is a function/functor that takes the same + number of pointers of type scalar as the number of given tensors. For example, + to compute a = b * c, op would be of the form: + [](scalar* a_val, const scalar* b_val, const scalar* c_val) { a_val[0] = + b_val[0] * c_val[0]; }; +*/ + +template +inline void CPU_tensor_apply2(Tensor tensor1, Tensor tensor2, const Op op) { + if (!_apply_preamble({tensor1, tensor2})) + return; + if (_max_dim_tensors({tensor1, tensor2}) <= 8) { + apply_op( + tensor1.numel(), + 0, + op, + strided_tensor_iter_fixed(tensor1), + strided_tensor_iter_fixed(tensor2)); + } else { + apply_op( + tensor1.numel(), + 0, + op, + strided_tensor_iter(tensor1), + strided_tensor_iter(tensor2)); + } +} + +template +inline void CPU_tensor_apply3( + Tensor tensor1, + Tensor tensor2, + Tensor tensor3, + const Op op) { + if (!_apply_preamble({tensor1, tensor2, tensor3})) + return; + if (_max_dim_tensors({tensor1, tensor2, tensor3}) <= 8) { + apply_op( + tensor1.numel(), + 0, + op, + strided_tensor_iter_fixed(tensor1), + strided_tensor_iter_fixed(tensor2), + strided_tensor_iter_fixed(tensor3)); + } else { + apply_op( + tensor1.numel(), + 0, + op, + strided_tensor_iter(tensor1), + strided_tensor_iter(tensor2), + strided_tensor_iter(tensor3)); + } +} + +template < + typename scalar1, + typename scalar2, + typename scalar3, + typename scalar4, + typename Op> +inline void CPU_tensor_apply4( + Tensor tensor1, + Tensor tensor2, + Tensor tensor3, + Tensor tensor4, + const Op op) { + if (!_apply_preamble({tensor1, tensor2, tensor3, tensor4})) + return; + if (_max_dim_tensors({tensor1, tensor2, tensor3, tensor4}) <= 8) { + apply_op( + tensor1.numel(), + 0, + op, + strided_tensor_iter_fixed(tensor1), + strided_tensor_iter_fixed(tensor2), + strided_tensor_iter_fixed(tensor3), + strided_tensor_iter_fixed(tensor4)); + } else { + apply_op( + tensor1.numel(), + 0, + op, + strided_tensor_iter(tensor1), + strided_tensor_iter(tensor2), + strided_tensor_iter(tensor3), + strided_tensor_iter(tensor4)); + } +} + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h b/venv/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..cf621f34cc63735d7f7557f48146bb76467b8afc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h @@ -0,0 +1,33 @@ +#pragma once + +#include +#include + +// This file creates a fake allocator that just throws exceptions if +// it is actually used. + +// state passed to the allocator is the std::function called +// when the blob is release by ATen + +namespace at { + +static cpu_fixed_malloc(void*, ptrdiff_t) { + AT_ERROR("attempting to resize a tensor view of an external blob"); +} + +static cpu_fixed_realloc(void*, void*, ptrdiff_t) { + AT_ERROR("attempting to resize a tensor view of an external blob"); +} + +static cpu_fixed_free(void* state, void* allocation) { + auto on_release = static_cast*>(state); + (*on_release)(allocation); + delete on_release; +} + +static Allocator CPU_fixed_allocator = { + cpu_fixed_malloc, + cpu_fixed_realloc, + cpu_fixed_free}; + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions.h b/venv/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..10223723e47fda0105448b72b6f081b92d85f5bc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h b/venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..e7d79fc715d6209920c6f3d4a2d02c7d8077b6bd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h @@ -0,0 +1,542 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h b/venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..e68b8871d95bd8a3d013559d0fe79c450ad04c29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h @@ -0,0 +1,323 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/Config.h b/venv/lib/python3.10/site-packages/torch/include/ATen/Config.h new file mode 100644 index 0000000000000000000000000000000000000000..b94c4d46dba26adccf67224f3f82de0b9387b284 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/Config.h @@ -0,0 +1,22 @@ +#pragma once + +// Test these using #if AT_MKL_ENABLED(), not #ifdef, so that it's +// obvious if you forgot to include Config.h +// c.f. https://stackoverflow.com/questions/33759787/generating-an-error-if-checked-boolean-macro-is-not-defined +// +// DO NOT put the macros for CUDA libraries in this file; they belong in cuda/CUDAConfig.h + +#define AT_MKLDNN_ENABLED() 1 +#define AT_MKLDNN_ACL_ENABLED() 0 +#define AT_MKL_ENABLED() 1 +#define AT_MKL_SEQUENTIAL() 0 +#define AT_POCKETFFT_ENABLED() 0 +#define AT_NNPACK_ENABLED() 1 +#define CAFFE2_STATIC_LINK_CUDA() 0 +#define AT_BUILD_WITH_BLAS() 1 +#define AT_BUILD_WITH_LAPACK() 1 +#define AT_PARALLEL_OPENMP 1 +#define AT_PARALLEL_NATIVE 0 +#define AT_PARALLEL_NATIVE_TBB 0 +#define AT_BLAS_F2C() 0 +#define AT_BLAS_USE_CBLAS_DOT() 0 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/DeviceAccelerator.h b/venv/lib/python3.10/site-packages/torch/include/ATen/DeviceAccelerator.h new file mode 100644 index 0000000000000000000000000000000000000000..c3e800c7e07c65c4289baa46ba29d9b61cc5dd20 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/DeviceAccelerator.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include + +#include +#include + +// This file defines the top level Accelerator concept for PyTorch. +// A device is an accelerator per the definition here if: +// - It is mutually exclusive with all other accelerators +// - It performs asynchronous compute via a Stream/Event system +// - It provides a set of common APIs as defined by AcceleratorHooksInterface +// +// As of today, accelerator devices are (in no particular order): +// CUDA, MTIA, PrivateUse1 +// We want to add once all the proper APIs are supported and tested: +// HIP, MPS, XPU + +namespace at { + +// Ensures that only one accelerator is available (at +// compile time if possible) and return it. +// When checked is true, the returned optional always has a value. +TORCH_API std::optional getAccelerator(bool checked = false); + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h b/venv/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..adc7f3efdbb6a0e7d12fd6fcd0117089a83e8e85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include +#include // TensorList whyyyyy + +namespace at { + +// Are you here because you're wondering why DeviceGuard(tensor) no +// longer works? For code organization reasons, we have temporarily(?) +// removed this constructor from DeviceGuard. The new way to +// spell it is: +// +// OptionalDeviceGuard guard(device_of(tensor)); + +/// Return the Device of a Tensor, if the Tensor is defined. +inline c10::optional device_of(const Tensor& t) { + if (t.defined()) { + return c10::make_optional(t.device()); + } else { + return c10::nullopt; + } +} + +inline c10::optional device_of(const c10::optional& t) { + return t.has_value() ? device_of(t.value()) : c10::nullopt; +} + +/// Return the Device of a TensorList, if the list is non-empty and +/// the first Tensor is defined. (This function implicitly assumes +/// that all tensors in the list have the same device.) +inline c10::optional device_of(ITensorListRef t) { + if (!t.empty()) { + return device_of(t.front()); + } else { + return c10::nullopt; + } +} + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/DimVector.h b/venv/lib/python3.10/site-packages/torch/include/ATen/DimVector.h new file mode 100644 index 0000000000000000000000000000000000000000..cb652fffcb14819d8ca5292daa012ad47f4c3fad --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/DimVector.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/Dimname.h b/venv/lib/python3.10/site-packages/torch/include/ATen/Dimname.h new file mode 100644 index 0000000000000000000000000000000000000000..71836a9e25d3d82d9cd5024b2f33e147e14bf87e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/Dimname.h @@ -0,0 +1 @@ +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/DynamicLibrary.h b/venv/lib/python3.10/site-packages/torch/include/ATen/DynamicLibrary.h new file mode 100644 index 0000000000000000000000000000000000000000..523a21985f225eb72ac23c562e990fc105bd1ed4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/DynamicLibrary.h @@ -0,0 +1,34 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +class DynamicLibraryError : public Error { + using Error::Error; +}; + +} // namespace c10 + +namespace at { + +struct DynamicLibrary { + AT_DISALLOW_COPY_AND_ASSIGN(DynamicLibrary); + + TORCH_API DynamicLibrary( + const char* name, + const char* alt_name = nullptr, + bool leak_handle = false); + + TORCH_API void* sym(const char* name); + + TORCH_API ~DynamicLibrary(); + + private: + bool leak_handle; + void* handle = nullptr; +}; + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/EmptyTensor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/EmptyTensor.h new file mode 100644 index 0000000000000000000000000000000000000000..5f8681ce37f960b953e6d8dcc50c657c69f1c536 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/EmptyTensor.h @@ -0,0 +1,160 @@ +#pragma once +#include + +namespace at::detail { + +inline void check_size_nonnegative(ArrayRef size) { + for (const auto& x : size) { + TORCH_CHECK( + x >= 0, + "Trying to create tensor with negative dimension ", + x, + ": ", + size); + } +} + +inline void check_size_nonnegative(ArrayRef size) { + for (const auto& x : size) { + TORCH_CHECK( + x.expect_size(__FILE__, __LINE__), + "Trying to create tensor with negative dimension ", + x, + ": ", + size); + } +} + +TORCH_API size_t computeStorageNbytesContiguous( + IntArrayRef sizes, + size_t itemsize, + size_t storage_offset = 0); +TORCH_API SymInt computeStorageNbytesContiguous( + SymIntArrayRef sizes, + const SymInt& itemsize, + const SymInt& storage_offset = 0); +TORCH_API size_t computeStorageNbytes( + IntArrayRef sizes, + IntArrayRef strides, + size_t itemsize, + size_t storage_offset = 0); +TORCH_API SymInt computeStorageNbytes( + SymIntArrayRef sizes, + SymIntArrayRef strides, + const SymInt& itemsize, + const SymInt& storage_offset = 0); + +TORCH_API TensorBase empty_generic( + IntArrayRef size, + c10::Allocator* allocator, + c10::DispatchKeySet ks, + ScalarType scalar_type, + c10::optional memory_format_opt); + +TORCH_API TensorBase empty_strided_generic( + IntArrayRef size, + IntArrayRef stride, + c10::Allocator* allocator, + c10::DispatchKeySet ks, + ScalarType scalar_type); + +TORCH_API TensorBase empty_strided_symint_generic( + SymIntArrayRef size, + SymIntArrayRef stride, + c10::Allocator* allocator, + c10::DispatchKeySet ks, + ScalarType scalar_type); + +TORCH_API TensorBase empty_cpu( + IntArrayRef size, + ScalarType dtype, + bool pin_memory = false, + c10::optional memory_format_opt = c10::nullopt); + +TORCH_API TensorBase empty_cpu( + IntArrayRef size, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt, + c10::optional memory_format_opt); + +TORCH_API TensorBase empty_cpu(IntArrayRef size, const TensorOptions& options); + +TORCH_API TensorBase empty_strided_cpu( + IntArrayRef size, + IntArrayRef stride, + ScalarType dtype, + bool pin_memory = false); + +TORCH_API TensorBase empty_strided_cpu( + IntArrayRef size, + IntArrayRef stride, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt); + +TORCH_API TensorBase empty_strided_cpu( + IntArrayRef size, + IntArrayRef stride, + const TensorOptions& options); + +TORCH_API TensorBase empty_meta( + IntArrayRef size, + ScalarType dtype, + c10::optional memory_format_opt = c10::nullopt); + +TORCH_API TensorBase empty_meta( + IntArrayRef size, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt, + c10::optional memory_format_opt); + +TORCH_API TensorBase empty_symint_meta( + SymIntArrayRef size, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt, + c10::optional memory_format_opt); + +TORCH_API TensorBase empty_meta(IntArrayRef size, const TensorOptions& options); + +TORCH_API TensorBase +empty_strided_meta(IntArrayRef size, IntArrayRef stride, ScalarType dtype); + +TORCH_API TensorBase empty_strided_meta( + IntArrayRef size, + IntArrayRef stride, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt); + +TORCH_API TensorBase empty_strided_meta( + IntArrayRef size, + IntArrayRef stride, + const TensorOptions& options); + +TORCH_API TensorBase empty_strided_symint_meta( + SymIntArrayRef size, + SymIntArrayRef stride, + ScalarType dtype); + +TORCH_API TensorBase empty_strided_symint_meta( + SymIntArrayRef size, + SymIntArrayRef stride, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt); + +TORCH_API TensorBase empty_strided_symint_meta( + SymIntArrayRef size, + SymIntArrayRef stride, + const TensorOptions& options); + +} // namespace at::detail diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ExpandUtils.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ExpandUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..82db1f8b6517cb9253dff2f25b13ab8a98d4bfd4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ExpandUtils.h @@ -0,0 +1,527 @@ +#pragma once + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#endif + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace at { + +TORCH_API std::vector infer_size(IntArrayRef a, IntArrayRef b); +TORCH_API std::vector infer_size_symint( + SymIntArrayRef a, + SymIntArrayRef b); +TORCH_API DimVector infer_size_dimvector(IntArrayRef a, IntArrayRef b); +TORCH_API SymDimVector +infer_size_symdimvector(SymIntArrayRef a, SymIntArrayRef b); + +// Named type instead of a pair/tuple so that we can be sure to +// construct the vectors in place and get NRVO. +template +struct InferExpandGeometryResult { + Container sizes; + Container strides; + explicit InferExpandGeometryResult(size_t ndim) + : sizes(ndim), strides(ndim) {} + explicit InferExpandGeometryResult(IntArrayRef sizes_, size_t ndim) + : sizes(sizes_.begin(), sizes_.end()), strides(ndim) {} +}; + +TORCH_API std::tuple, std::vector> +inferExpandGeometry( + IntArrayRef tensor_sizes, + IntArrayRef tensor_strides, + IntArrayRef sizes); + +TORCH_API InferExpandGeometryResult inferExpandGeometry_dimvector( + IntArrayRef tensor_sizes, + IntArrayRef tensor_strides, + IntArrayRef sizes); + +TORCH_API std::vector infer_dense_strides( + IntArrayRef tensor_sizes, + IntArrayRef tensor_strides); + +// True if input shapes are expandable +// NOTE: infer_size did a similar check, please keep them sync if change is +// needed +inline bool are_expandable(IntArrayRef shape1, IntArrayRef shape2) { + size_t ndim1 = shape1.size(); + size_t ndim2 = shape2.size(); + size_t ndim = ndim1 < ndim2 ? ndim1 : ndim2; + + for (int64_t i = static_cast(ndim) - 1; i >= 0; --i) { + if (shape1[--ndim1] == shape2[--ndim2] || shape1[ndim1] == 1 || + shape2[ndim2] == 1) { + continue; + } + return false; + } + return true; +} + +// avoid copy-construction of Tensor by using a reference_wrapper. +inline void check_defined( + std::initializer_list> tensors, + const char* api_name) { + for (auto& t : tensors) { + if (!t.get().defined()) { + AT_ERROR(api_name, "(...) called with an undefined Tensor"); + } + } +} + +// NOTE [ ExpandUtils Borrowing ] +// +// Functions in ExpandUtils return `c10::MaybeOwned` because +// expansion may not actually be needed, in which case we can improve +// efficiency by returning +// `c10::MaybeOwned::borrowed(to_expand)`. However, this means +// that you need to be careful: the returned `c10::MaybeOwned` +// must not outlive the original `Tensor` object that `to_expand` +// referred to! The deleted rvalue reference overloads of these +// functions help with this by preventing trivial use of a temporary +// resulting from a function call, but it is still possible to make a +// mistake. + +inline c10::MaybeOwned expand_inplace( + const Tensor& tensor, + const Tensor& to_expand) { + if (tensor.sym_sizes().equals(to_expand.sym_sizes())) { + return c10::MaybeOwned::borrowed(to_expand); + } + return c10::MaybeOwned::owned( + to_expand.expand_symint(tensor.sym_sizes())); +} + +inline c10::MaybeOwned expand_inplace( + const Tensor& tensor, + Tensor&& to_expand) = delete; + +inline c10::MaybeOwned expand_inplace( + const Tensor& tensor, + const Tensor& to_expand, + const char* api_name) { + check_defined({tensor, to_expand}, api_name); + return expand_inplace(tensor, to_expand); +} + +inline c10::MaybeOwned expand_inplace( + const Tensor& tensor, + Tensor&& to_expand, + const char* api_name) = delete; + +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + const Tensor& to_expand1, + const Tensor& to_expand2) { + if (tensor.sizes().equals(to_expand1.sizes()) && + tensor.sizes().equals((to_expand2.sizes()))) { + return std::make_tuple( + c10::MaybeOwned::borrowed(to_expand1), + c10::MaybeOwned::borrowed(to_expand2)); + } + + return std::make_tuple( + c10::MaybeOwned::owned(to_expand1.expand(tensor.sizes())), + c10::MaybeOwned::owned(to_expand2.expand(tensor.sizes()))); +} + +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + Tensor&& to_expand1, + const Tensor& to_expand2) = delete; +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + const Tensor& to_expand1, + Tensor&& to_expand2) = delete; +inline std::tuple, c10::MaybeOwned> +expand_inplace(const Tensor& tensor, Tensor&& to_expand1, Tensor&& to_expand2) = + delete; + +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + const Tensor& to_expand1, + const Tensor& to_expand2, + const char* api_name) { + check_defined({tensor, to_expand1, to_expand2}, api_name); + return expand_inplace(tensor, to_expand1, to_expand2); +} + +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + Tensor&& to_expand1, + const Tensor& to_expand2, + const char* api_name) = delete; +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + const Tensor& to_expand1, + Tensor&& to_expand2, + const char* api_name) = delete; +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + Tensor&& to_expand1, + Tensor&& to_expand2, + const char* api_name) = delete; + +// See NOTE [ ExpandUtils Borrowing ] above for `MaybeOwned` explanation. +inline std::tuple, c10::MaybeOwned> +expand_outplace(const Tensor& to_expand1, const Tensor& to_expand2) { + auto s1 = to_expand1.sym_sizes(); + auto s2 = to_expand2.sym_sizes(); + if (s1.equals(s2)) { + return std::make_tuple( + c10::MaybeOwned::borrowed(to_expand1), + c10::MaybeOwned::borrowed(to_expand2)); + } + + auto expanded_size = infer_size_symdimvector(s1, s2); + return std::make_tuple( + c10::MaybeOwned::owned(to_expand1.expand_symint(expanded_size)), + c10::MaybeOwned::owned(to_expand2.expand_symint(expanded_size))); +} + +inline std::tuple, c10::MaybeOwned> +expand_outplace(Tensor&& to_expand1, const Tensor& to_expand2) = delete; +inline std::tuple, c10::MaybeOwned> +expand_outplace(const Tensor& to_expand1, Tensor&& to_expand2) = delete; +inline std::tuple, c10::MaybeOwned> +expand_outplace(Tensor&& to_expand1, Tensor&& to_expand2) = delete; + +inline std::tuple, c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + const Tensor& to_expand2, + const char* api_name) { + check_defined({to_expand1, to_expand2}, api_name); + return expand_outplace(to_expand1, to_expand2); +} + +inline std::tuple, c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + const Tensor& to_expand2, + const char* api_name) = delete; +inline std::tuple, c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + Tensor&& to_expand2, + const char* api_name) = delete; +inline std::tuple, c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + Tensor&& to_expand2, + const char* api_name) = delete; + +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + const Tensor& to_expand2, + const Tensor& to_expand3) { + if (to_expand1.sizes().equals(to_expand2.sizes()) && + to_expand1.sizes().equals(to_expand3.sizes())) { + return std::make_tuple( + c10::MaybeOwned::borrowed(to_expand1), + c10::MaybeOwned::borrowed(to_expand2), + c10::MaybeOwned::borrowed(to_expand3)); + } + + auto expanded_size12 = + infer_size_dimvector(to_expand1.sizes(), to_expand2.sizes()); + auto expanded_size = + infer_size_dimvector(expanded_size12, to_expand3.sizes()); + return std::make_tuple( + c10::MaybeOwned::owned(to_expand1.expand(expanded_size)), + c10::MaybeOwned::owned(to_expand2.expand(expanded_size)), + c10::MaybeOwned::owned(to_expand3.expand(expanded_size))); +} + +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + const Tensor& to_expand2, + const Tensor& to_expand3) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + Tensor&& to_expand2, + const Tensor& to_expand3) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + Tensor&& to_expand2, + const Tensor& to_expand3) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + const Tensor& to_expand2, + Tensor&& to_expand3) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + const Tensor& to_expand2, + Tensor&& to_expand3) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + Tensor&& to_expand2, + Tensor&& to_expand3) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace(Tensor&& to_expand1, Tensor&& to_expand2, Tensor&& to_expand3) = + delete; + +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + const Tensor& to_expand2, + const Tensor& to_expand3, + const char* api_name) { + check_defined({to_expand1, to_expand2, to_expand3}, api_name); + return expand_outplace(to_expand1, to_expand2, to_expand3); +} + +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + const Tensor& to_expand2, + const Tensor& to_expand3, + const char* api_name) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + Tensor&& to_expand2, + const Tensor& to_expand3, + const char* api_name) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + Tensor&& to_expand2, + const Tensor& to_expand3, + const char* api_name) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + const Tensor& to_expand2, + Tensor&& to_expand3, + const char* api_name) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + const Tensor& to_expand2, + Tensor&& to_expand3, + const char* api_name) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + Tensor&& to_expand2, + Tensor&& to_expand3, + const char* api_name) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + Tensor&& to_expand2, + Tensor&& to_expand3, + const char* api_name) = delete; + +inline c10::MaybeOwned expand_size( + const Tensor& to_expand, + IntArrayRef sizes) { + if (to_expand.sizes().equals(sizes)) { + return c10::MaybeOwned::borrowed(to_expand); + } + + return c10::MaybeOwned::owned(to_expand.expand(sizes)); +} + +inline c10::MaybeOwned expand_size( + Tensor&& to_expand, + IntArrayRef sizes) = delete; + +inline c10::MaybeOwned expand_size( + const Tensor& to_expand, + IntArrayRef sizes, + const char* api_name) { + check_defined({to_expand}, api_name); + return expand_size(to_expand, sizes); +} + +inline c10::MaybeOwned expand_size( + Tensor&& to_expand, + IntArrayRef sizes, + const char* api_name) = delete; + +inline std::vector expand_outplace(TensorList to_expand) { + // expands a list of Tensors; ignores undefined (null) tensors + bool first = true; + DimVector sizes; + for (const auto i : c10::irange(to_expand.size())) { + if (!to_expand[i].defined()) { + continue; + } else if (first) { + sizes = to_expand[i].sizes(); + first = false; + } else { + sizes = infer_size_dimvector(sizes, to_expand[i].sizes()); + } + } + + std::vector result(to_expand.size()); + for (const auto i : c10::irange(to_expand.size())) { + if (!to_expand[i].defined()) { + continue; + } else if (to_expand[i].sizes().equals(sizes)) { + result[i] = to_expand[i]; + } else { + result[i] = to_expand[i].expand(sizes); + } + } + return result; +} + +template +inline Tensor _sum_to( + Tensor tensor, + const c10::ArrayRef shape, + bool always_return_non_view = false) { + if (shape.size() == 0) { + return tensor.sum(); + } + + auto sizes = at::symint::sizes(tensor); + c10::SmallVector reduce_dims; + const int64_t leading_dims = sizes.size() - shape.size(); + for (const auto i : c10::irange(leading_dims)) { + reduce_dims.push_back(i); + } + for (int64_t i = leading_dims; i < static_cast(sizes.size()); ++i) { + if (shape[i - leading_dims] == 1 && sizes[i] != 1) { + reduce_dims.push_back(i); + } + } + + if (!reduce_dims.empty()) { + tensor = tensor.sum(reduce_dims, /*keepdim=*/true); + } + + if (always_return_non_view) { + // This is only actually used by the functionalization pass. + // We want to be able to guarantee that this function doesn't return a view + // of the input. + return leading_dims > 0 ? at::symint::view_copy(tensor, shape) + : tensor.clone(); + } else { + return leading_dims > 0 ? at::symint::view(tensor, shape) : tensor; + } +} + +inline Tensor sum_to( + Tensor tensor, + const c10::SymIntArrayRef shape, + bool always_return_non_view = false) { + return _sum_to(std::move(tensor), shape, always_return_non_view); +} + +// Sums `tensor` repeatedly to produce a tensor of shape `shape`. +// Precondition: is_expandable_to(shape, tensor.sizes()) must be true +inline Tensor sum_to( + Tensor tensor, + const IntArrayRef shape, + bool always_return_non_view = false) { + return _sum_to(std::move(tensor), shape, always_return_non_view); +} + +static inline bool is_expandable_to( + SymIntArrayRef shape, + c10::SymIntArrayRef desired) { + size_t ndim = shape.size(); + size_t target_dim = desired.size(); + if (ndim > target_dim) { + return false; + } + for (const auto i : c10::irange(ndim)) { + const auto& size = shape[ndim - i - 1]; + const auto& target = desired[target_dim - i - 1]; + if (size != target && size != 1) { + return false; + } + } + return true; +} + +static inline bool is_expandable_to(IntArrayRef shape, IntArrayRef desired) { + auto sym_shape = c10::SymIntArrayRef( + reinterpret_cast(shape.data()), shape.size()); + auto sym_desired = c10::SymIntArrayRef( + reinterpret_cast(desired.data()), desired.size()); + return is_expandable_to(sym_shape, sym_desired); +} + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/Formatting.h b/venv/lib/python3.10/site-packages/torch/include/ATen/Formatting.h new file mode 100644 index 0000000000000000000000000000000000000000..392e2a27b0130c7ba55621d6ac1d6fd4e989db02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/Formatting.h @@ -0,0 +1 @@ +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/FunctionalTensorWrapper.h b/venv/lib/python3.10/site-packages/torch/include/ATen/FunctionalTensorWrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..6291b2743459625144e7411b0ebf2c4f0bfaa217 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/FunctionalTensorWrapper.h @@ -0,0 +1,408 @@ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace at { + +// Note [Functionalization Pass In Core] +// The Functionalization pass is used to remove aliasing from a pytorch program. +// +// This is useful for backends that don't support aliasing, like XLA and Vulkan. +// It's also necessary in order to remove mutation from a program, which is +// needed in Functorch. +// +// Consider this program: +// a = torch.ones(...) +// b = a.view(...) +// b.add_(1) +// +// In this program, b is meant to alias with a due to the use of view(). At the +// end of the program, both a and b are full of 2's. However, backends that +// don't support aliasing aren't able to correctly implement the view() +// operator. Instead, they can opt into the Functionalization pass, which will +// sit between the user and the backend, and provide the necessary aliasing +// logic. +// +// The functionalization pass will turn the above program into a slightly +// different program that has the same semantics, transparently to the user, +// that backends like XLA/Vulkan are able to implement a = torch.ones(...) b = +// a.view_copy(...) # view() replaced with view_copy(). Backends like +// XLA/Vulkan can implement this! b.add_(1) a.add_(1) # Our functionalization +// pass machinery knows that a and b are aliased - it applies b's mutation to a +// too. +// +// So, how does the functionalization pass keep track of which tensors are +// aliased? The pass works by wrapping EVERY tensor in the program inside of a +// FunctionalTensorWrapper, which knows about its alias'd tensors. +// +// See Note [Functionalization: Alias Removal] for details on the aliasing +// machinery. See Note [Functionalization: Mutation Removal] for details on +// mutation removal. +struct TORCH_API FunctionalTensorWrapper : public c10::TensorImpl { + explicit FunctionalTensorWrapper(const Tensor& value); + // Additional constructor to create a FunctionalTensorWrapper directly from an + // underlying tensor that was created from a view. For example, the code b = + // a.view1() will generate a constructor call to FunctionalTensorWrapper(b, a, + // view1_meta) + explicit FunctionalTensorWrapper( + const Tensor& view_value, + const FunctionalTensorWrapper* base, + const functionalization::ViewMeta& meta); + + // Get the underlying, actual tensor, that doesn't know anything about + // functionalization. + const Tensor& value() const { + return value_; + }; + // The concept of "level" is only ever important to functorch; it's exposed + // here as more of a hook for functorch to use. + int64_t level() const { + return level_; + }; + void set_level(int64_t level) { + level_ = level; + } + bool has_metadata_mutation() const { + return has_metadata_mutation_; + }; + + // Denotes a mutation that's hidden from autograd, + // e.g. for the purposes of passing a tensor to a triton kernel + void mark_mutation_hidden_from_autograd() { + mutation_hidden_from_autograd_counter_++; + } + void mark_mutation_during_no_grad_or_inference_mode() { + mutation_during_no_grad_or_inference_mode_++; + } + // Are all the mutations happening to the tensor hidden from autograd + bool are_all_mutations_hidden_from_autograd() const { + return mutation_hidden_from_autograd_counter_ == mutation_counter_; + } + // Did all mutations happen under no_grad or inference_mode + // (We also need to ignore mutations fully hidden from autograd here) + bool are_all_mutations_under_no_grad_or_inference_mode() const { + return mutation_hidden_from_autograd_counter_ + + mutation_during_no_grad_or_inference_mode_ == + mutation_counter_; + } + + // Sync's the underlying tensor with its alias, if it's out of date. This + // involves two steps: 1) Apply any pending updates/mutations to the alias 2) + // Replay the views (if any) to regenerate the current tensor off of the + // updated alias. + void sync_(); + // Performs step (1) of the sync. This is its own public API because it's + // needed by view_inplace ops like transpose_. See Note [Functionalization + // Pass - Inplace View Ops] + void regenerate_from_base(); + // Performs step (2) of the sync. This is its own public API because it's + // needed by functorch. functorch wants to make sure that all input tensors to + // a functionalized program have been properly synced so it can properly + // propagate mutations to inputs. It can't just call sync_(), because the + // FunctionalTensorWrapper will look like it has no aliases and sync_ will be + // a noop. We use the reference count on storage_ to determine if the wrapper + // is aliased, and by the time functorch is ready to propagate updates to + // inputs, any intermediate views of the input created by the program will + // have been deallocated. This function also returns whether or not the base + // actually had any updates to apply. + bool apply_updates(); + // Takes the current state of value_ and snapshots it, sending it as a pending + // update to the alias. + void commit_update(); + // When any tensor is mutated, the tensor increments its alias's "generation". + // Separately, each tensor maintains its own "generation" counter, which is + // used to determine if it's up-to-date with its alias. The act of syncing a + // tensor will set a tensor's generation equal to its alias's generation. + bool is_up_to_date() const; + // Freezes the storage of this tensor, preventing subsequent mutations + void freeze_storage() const; + // Every FunctionalTensorWrapper contains a vector objects + // describing the series of view ops that ran to generate the current tensor + // from the base tensor. This method is used by inplace-view ops like + // transpose_. It appends a ViewMeta to the existing stack, and refreshes the + // tensor by replaying the views off of the alias. + void mutate_view_meta(const at::functionalization::ViewMeta& meta); + + // Custom implementation of self.set_(src) + void set__impl(const FunctionalTensorWrapper* other); + + // Returns whether the current tensor's data was ever mutated + bool has_data_mutation(); + // + // Returns whether the current FunctionalTensorWrapper + // experienced a set_() call. + bool was_storage_changed() { + return was_storage_changed_; + } + + // The functionalization pass can be used to remove mutations. + // It does so by replacing any mutation op with it's corresponding + // out-of-place op, followed by a call to replace_(). e.g: + // + // a.add_(1) + // + // will turn into: + // + // tmp = a.add(1) + // a.replace_(tmp) + // + // replace_() swaps out the wrapped tensor, value_, with tmp. + void replace_(const Tensor& other); + + bool is_multi_output_view() { + return is_multi_output_view_; + } + + // See Note[resize_() in functionalization pass] + void maybe_replace_storage(const Tensor& other); + + // Replaces the storage with a new functional storage, + // and clears the view_metas_ stack. + // WARNING: Calling this function will sever the aliasing relationship between + // the current FunctionalTensorWrapper and any of its outstanding aliases. + // Please only call if you know what you're doing. + void _unsafe_reset_storage(); + + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override; + + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override; + + ~FunctionalTensorWrapper() override = default; + + // FunctionalTensorWrapper overrides all custom size/stride function, + // so that if the inner tensor has a custom implementation + // we make sure to call that implementation. + at::IntArrayRef sizes_custom() const override; + at::IntArrayRef strides_custom() const override; + int64_t dim_custom() const override; + int64_t numel_custom() const override; + bool is_contiguous_custom(at::MemoryFormat memory_format) const override; + c10::SymIntArrayRef sym_sizes_custom() const override; + c10::SymInt sym_size_custom(int64_t d) const override; + c10::SymIntArrayRef sym_strides_custom() const override; + c10::SymInt sym_storage_offset_custom() const override; + c10::Device device_custom() const override; + + private: + const char* tensorimpl_type_name() const override; + void set_constructor_metadata(); + functionalization::FunctionalStorageImpl* functional_storage_impl() const; + + // This is used to re-implement shallow_copy_and_detach for + // FunctionalTensorWrapper. The implementation is identical, but we just need + // to return a subclass instead of a plain TensorImpl. + // TODO: maybe it's possible to arrange for that to happen automatically + // without an override here? + template + c10::intrusive_ptr shallow_copy_and_detach_core( + VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const; + + void shallow_copy_from(const c10::intrusive_ptr& impl) override; + void copy_tensor_metadata_and_refresh( + const FunctionalTensorWrapper* src_impl, + FunctionalTensorWrapper* dest_impl, + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const; + + // Note that value is not taken by reference: internally, the wrapper will + // change the value tensor that it points to over time. + Tensor value_; + int64_t level_{}; + // These two counters are used for identifying + // whether all the mutations on a given tensor are hidden from autograd or + // not. If we have an input mutation that is hidden from autograd, then once + // we convert the input mutation to a copy_() we know it will be safe to hide + // the copy_() from autograd as well. + uint64_t mutation_counter_ = 0; + uint64_t mutation_hidden_from_autograd_counter_ = 0; + uint64_t mutation_during_no_grad_or_inference_mode_ = 0; + bool has_metadata_mutation_ = false; + bool is_multi_output_view_ = false; + // Did the tensor experience a set_() call. + bool was_storage_changed_ = false; + + size_t generation_ = 0; + std::vector view_metas_; + + protected: + static void copy_tensor_metadata( + const FunctionalTensorWrapper* src_impl, + FunctionalTensorWrapper* dest_impl, + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change); +}; + +// Utility functions for the functionalization pass. + +namespace functionalization { +namespace impl { + +TORCH_API inline FunctionalTensorWrapper* unsafeGetFunctionalWrapper( + const Tensor& tensor) { + auto functional_impl = + static_cast(tensor.unsafeGetTensorImpl()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(functional_impl != nullptr); + return functional_impl; +} + +TORCH_API bool isFunctionalTensor(const at::Tensor& tensor); +TORCH_API bool isFunctionalTensor(const c10::optional& t); +TORCH_API bool isFunctionalTensor( + const c10::List>& t_list); +TORCH_API bool isFunctionalTensor(ITensorListRef list); + +TORCH_API Tensor to_functional_tensor(const Tensor& tensor); +TORCH_API c10::optional to_functional_tensor( + const c10::optional& tensor); +TORCH_API c10::List> to_functional_tensor( + const c10::List>& t_list); +TORCH_API std::vector to_functional_tensor(ITensorListRef t_list); + +TORCH_API void freeze_functional_tensor(const Tensor& tensor); + +TORCH_API Tensor +from_functional_tensor(const Tensor& tensor, bool assert_functional = true); +TORCH_API c10::optional from_functional_tensor( + const c10::optional& t, + bool assert_functional = true); +TORCH_API c10::List> from_functional_tensor( + const c10::List>& t_list); +TORCH_API std::vector from_functional_tensor(ITensorListRef t_list); + +TORCH_API void sync(const at::Tensor& t); +TORCH_API void sync(const c10::optional& t); +TORCH_API void sync(const c10::List>& t_list); +TORCH_API void sync(ITensorListRef t_list); + +TORCH_API void replace_(const Tensor& functional_tensor, const Tensor& other); +TORCH_API void replace_( + const ITensorListRef functional_tensor, + ITensorListRef other); + +TORCH_API void commit_update(const Tensor& functional_tensor); +TORCH_API void commit_update(ITensorListRef functional_tensor); + +TORCH_API void unsafe_reset_storage(const Tensor& functional_tensor); + +TORCH_API void mark_mutation_hidden_from_autograd( + const Tensor& functional_tensor); + +TORCH_API bool are_all_mutations_hidden_from_autograd( + const Tensor& functional_tensor); + +TORCH_API bool are_all_mutations_under_no_grad_or_inference_mode( + const Tensor& functional_tensor); + +// These two methods are XLA-specific logic and are no-ops +// for the normal functionalization flow. +TORCH_API void propagate_xla_data( + const Tensor& functional_tensor, + const Tensor& other); +TORCH_API void propagate_xla_data( + const ITensorListRef functional_tensor, + ITensorListRef other); + +Tensor create_functional_tensor_with_view_meta( + const Tensor& view_to_wrap, + const Tensor& base, + functionalization::ViewMeta meta, + int64_t out_idx = 0); +std::vector create_functional_tensor_with_view_meta( + ITensorListRef view_to_wrap, + const Tensor& base, + const functionalization::ViewMeta& meta); + +void mutate_view_meta( + const Tensor& self, + const functionalization::ViewMeta& meta); + +void set_sizes_strides_offset(const Tensor& out, const Tensor& meta_out); +void set_sizes_strides_offset( + const std::vector& outs, + const std::vector& meta_outs); + +// ~~~~~ TLS used in functionalization ~~~~~ + +TORCH_API bool getFunctionalizationReapplyViewsTLS(); +TORCH_API void setFunctionalizationReapplyViewsTLS(bool reapply_views); + +class TORCH_API FunctionalizationReapplyViewsGuard { + public: + FunctionalizationReapplyViewsGuard(bool reapply_views) + : prev_(getFunctionalizationReapplyViewsTLS()) { + setFunctionalizationReapplyViewsTLS(reapply_views); + } + + ~FunctionalizationReapplyViewsGuard() { + setFunctionalizationReapplyViewsTLS(prev_); + } + + FunctionalizationReapplyViewsGuard( + const FunctionalizationReapplyViewsGuard&) = delete; + FunctionalizationReapplyViewsGuard operator=( + const FunctionalizationReapplyViewsGuard&) = delete; + FunctionalizationReapplyViewsGuard(FunctionalizationReapplyViewsGuard&&) = + delete; + FunctionalizationReapplyViewsGuard operator=( + FunctionalizationReapplyViewsGuard&&) = delete; + + private: + bool prev_; +}; + +} // namespace impl + +// Helper function to call an out-of-place composite aten kernel that may use +// mutations / views internally, and functionalize them. +TORCH_API void functionalize_op_helper( + const c10::OperatorHandle& op, + torch::jit::Stack* stack); + +template +struct _functionalize_aten_op final {}; + +template +struct _functionalize_aten_op final { + static ReturnType call( + typename c10::maybe_keep_symint::type... args) { + using FuncType = ReturnType( + typename c10::maybe_keep_symint::type...); + auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow( + (const char*)Op::name, (const char*)Op::overload_name) + .typed(); + + return c10::impl::BoxedKernelWrapper::call( + c10::BoxedKernel::makeFromFunction(), + op, + // BoxedKernelWrapper knows to ignore this keyset argument, + // because functionalize_op_helper doesn't take in a DispatchKeySet + c10::DispatchKeySet(), + args...); + } +}; + +template +using functionalize_aten_op = + _functionalize_aten_op; + +template +using functionalize_aten_op_symint = + _functionalize_aten_op; + +} // namespace functionalization +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h b/venv/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..d6914552eb0df70b18077c6ef10a55149790b5d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace at { + +// Represents the initial TensorOptions, before the "defaults" are ever changed. +// This is designed to be used in library code, where the explicit devices, +// dtypes, etc. are known. NOTE: this is not a stable API. +inline TensorOptions initialTensorOptions() { + return TensorOptions(kCPU).dtype(kFloat).layout(kStrided).requires_grad( + false); +} + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/Layout.h b/venv/lib/python3.10/site-packages/torch/include/ATen/Layout.h new file mode 100644 index 0000000000000000000000000000000000000000..ea71e2b469bcf02365c78ebfba1b1d0362b6e531 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/Layout.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h b/venv/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..f4a30edef623956d5072737336bfca6da5cb2bb4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h @@ -0,0 +1,139 @@ +#pragma once + +#include +#include + +namespace at { + +enum MappedAllocatorModes { + ALLOCATOR_MAPPED_SHARED = 1, + ALLOCATOR_MAPPED_SHAREDMEM = 2, + ALLOCATOR_MAPPED_EXCLUSIVE = 4, + ALLOCATOR_MAPPED_NOCREATE = 8, + ALLOCATOR_MAPPED_KEEPFD = 16, + ALLOCATOR_MAPPED_FROMFD = 32, + ALLOCATOR_MAPPED_UNLINK = 64 +}; + +// Sentinel value/type to help distinguish the file descriptor constructor from +// the non-file descriptor constructor +enum WithFd { WITH_FD }; + +TORCH_API std::string NewProcessWideShmHandle(); + +class TORCH_API MapAllocator { + public: + MapAllocator(c10::string_view filename, int flags, size_t size); + MapAllocator( + WithFd, + c10::string_view filename, + int fd, + int flags, + size_t size); + MapAllocator(const MapAllocator&) = delete; + MapAllocator& operator=(const MapAllocator&) = delete; + MapAllocator(MapAllocator&&) = delete; + MapAllocator& operator=(MapAllocator&&) = delete; + + const char* filename() const { + return filename_.c_str(); + } + int fd() const { +#ifdef _WIN32 + TORCH_CHECK(false, "MapAllocator::fd() is unsupported on Windows"); +#else + return fd_; +#endif + } + ptrdiff_t size() const { + return size_; + } + // Return a pointer to the actual data for this allocator + // (in the case of the refcounted allocator, this is offset + // from the base pointer.) + virtual void* data() const { + return base_ptr_; + } + + static MapAllocator* fromDataPtr(const at::DataPtr&); + static at::DataPtr makeDataPtr( + c10::string_view filename, + int flags, + size_t size, + size_t* actual_size_out); + static at::DataPtr makeDataPtr( + WithFd, + const char* filename, + int fd, + int flags, + size_t size, + size_t* actual_size_out); + + // Closes the data. Helps us avoid destructor shenanigans + virtual void close(); + + // This is very dangerous. You have to redefine this destructor for each + // subclass + virtual ~MapAllocator(); + + protected: + bool closed_ = false; + std::string filename_; + int flags_ = 0; + ptrdiff_t size_; /* mapped size */ +#ifdef _WIN32 + void* handle_; + void* event_; + std::string eventname_; +#else + int fd_ = -1; +#endif + void* base_ptr_ = nullptr; +}; + +// Base-from-member idiom +struct TORCH_API RefcountedMapAllocatorArgCheck { + RefcountedMapAllocatorArgCheck(int flags); +}; + +class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck, + public MapAllocator { + public: + RefcountedMapAllocator(const char* filename, int flags, size_t size); + RefcountedMapAllocator( + WithFd, + const char* filename, + int fd, + int flags, + size_t size); + + static RefcountedMapAllocator* fromDataPtr(const at::DataPtr&); + static at::DataPtr makeDataPtr( + const char* filename, + int flags, + size_t size, + size_t* actual_size_out); + static at::DataPtr makeDataPtr( + WithFd, + const char* filename, + int fd, + int flags, + size_t size, + size_t* actual_size_out); + + void* data() const override; + + void incref(); + int decref(); + void close() override; + + ~RefcountedMapAllocator() override { + RefcountedMapAllocator::close(); + } + + protected: + void checkFlags(); + void initializeAlloc(); +}; + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/MatrixRef.h b/venv/lib/python3.10/site-packages/torch/include/ATen/MatrixRef.h new file mode 100644 index 0000000000000000000000000000000000000000..901efff4cc23fa3d1a4483cb330325431ac95f1e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/MatrixRef.h @@ -0,0 +1,109 @@ +#pragma once +#include +#include + +#include + +namespace at { +/// MatrixRef - Like an ArrayRef, but with an extra recorded strides so that +/// we can easily view it as a multidimensional array. +/// +/// Like ArrayRef, this class does not own the underlying data, it is expected +/// to be used in situations where the data resides in some other buffer. +/// +/// This is intended to be trivially copyable, so it should be passed by +/// value. +/// +/// For now, 2D only (so the copies are actually cheap, without having +/// to write a SmallVector class) and contiguous only (so we can +/// return non-strided ArrayRef on index). +/// +/// P.S. dimension 0 indexes rows, dimension 1 indexes columns +template +class MatrixRef { + public: + typedef size_t size_type; + + private: + /// Underlying ArrayRef + ArrayRef arr; + + /// Stride of dim 0 (outer dimension) + size_type stride0; + + // Stride of dim 1 is assumed to be 1 + + public: + /// Construct an empty Matrixref. + /*implicit*/ MatrixRef() : arr(nullptr), stride0(0) {} + + /// Construct an MatrixRef from an ArrayRef and outer stride. + /*implicit*/ MatrixRef(ArrayRef arr, size_type stride0) + : arr(arr), stride0(stride0) { + TORCH_CHECK( + arr.size() % stride0 == 0, + "MatrixRef: ArrayRef size ", + arr.size(), + " not divisible by stride ", + stride0) + } + + /// @} + /// @name Simple Operations + /// @{ + + /// empty - Check if the matrix is empty. + bool empty() const { + return arr.empty(); + } + + const T* data() const { + return arr.data(); + } + + /// size - Get size a dimension + size_t size(size_t dim) const { + if (dim == 0) { + return arr.size() / stride0; + } else if (dim == 1) { + return stride0; + } else { + TORCH_CHECK( + 0, "MatrixRef: out of bounds dimension ", dim, "; expected 0 or 1"); + } + } + + size_t numel() const { + return arr.size(); + } + + /// equals - Check for element-wise equality. + bool equals(MatrixRef RHS) const { + return stride0 == RHS.stride0 && arr.equals(RHS.arr); + } + + /// @} + /// @name Operator Overloads + /// @{ + ArrayRef operator[](size_t Index) const { + return arr.slice(Index * stride0, stride0); + } + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + std::enable_if_t, MatrixRef>& operator=( + U&& Temporary) = delete; + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + std::enable_if_t, MatrixRef>& operator=( + std::initializer_list) = delete; +}; + +} // end namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/MemoryOverlap.h b/venv/lib/python3.10/site-packages/torch/include/ATen/MemoryOverlap.h new file mode 100644 index 0000000000000000000000000000000000000000..d41324249b39bc4f061a9cca62799057ac76ec43 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/MemoryOverlap.h @@ -0,0 +1,42 @@ +#pragma once + +#include + +namespace c10 { +struct TensorImpl; +} + +namespace at { +class TensorBase; + +// MemOverlap: Whether or not there is memory overlap +// +// No: Absolutely no memory overlap +// Yes: Absolutely yes memory overlap +// TooHard: There might be memory overlap, but it was too expensive to compute. +// +// NB: Please update the python test for these if you renumber them. +enum class MemOverlap { No, Yes, TooHard }; + +enum class MemOverlapStatus { Full, Partial, No, TooHard }; + +TORCH_API MemOverlap has_internal_overlap(const TensorBase& t); +TORCH_API MemOverlap has_internal_overlap(c10::TensorImpl* t); + +TORCH_API void assert_no_internal_overlap(const TensorBase& t); +TORCH_API void assert_no_internal_overlap(c10::TensorImpl* t); + +TORCH_API MemOverlapStatus +get_overlap_status(const TensorBase& a, const TensorBase& b); +TORCH_API MemOverlapStatus +get_overlap_status(const c10::TensorImpl* a, const c10::TensorImpl* b); + +TORCH_API void assert_no_partial_overlap( + const TensorBase& a, + const TensorBase& b); +void assert_no_partial_overlap(c10::TensorImpl* a, c10::TensorImpl* b); + +TORCH_API void assert_no_overlap(const TensorBase& a, const TensorBase& b); +TORCH_API void assert_no_overlap(c10::TensorImpl* a, c10::TensorImpl* b); + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions.h b/venv/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..4e1d24af30086427cb4e7ebebadc4830e5c7ce6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/MethodOperators.h b/venv/lib/python3.10/site-packages/torch/include/ATen/MethodOperators.h new file mode 100644 index 0000000000000000000000000000000000000000..bae6d022feb430980f670f6efbf9465c8e99350e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/MethodOperators.h @@ -0,0 +1,443 @@ +#pragma once + +// @generated by torchgen/gen.py from MethodOperators.h + +#ifdef TORCH_ASSERT_NO_OPERATORS +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if your change would be better placed in \ + another file, or if a more specific header might achieve the same goal. \ + See NOTE: [Tensor vs. TensorBase] +#endif + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace _ops { + +} // namespace _ops +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/NamedTensor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/NamedTensor.h new file mode 100644 index 0000000000000000000000000000000000000000..a7606b0a668a43800b89755af1371551909b23d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/NamedTensor.h @@ -0,0 +1 @@ +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h b/venv/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..0ad42ae816274117a276a9545e2057b5eb252a6c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h @@ -0,0 +1,283 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at::native { +struct NestedTensorImpl; +inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt); +int64_t get_numel_from_nested_size_tensor(const at::Tensor& tensor); + +struct TORCH_API NestedTensorImpl : public c10::TensorImpl { + explicit NestedTensorImpl( + Storage storage, + c10::DispatchKeySet key_set, + const caffe2::TypeMeta data_type, + at::Tensor nested_sizes, + at::Tensor nested_strides, + at::Tensor storage_offsets); + + explicit NestedTensorImpl( + const at::Tensor& buffer, + at::Tensor nested_sizes, + at::Tensor nested_strides, + at::Tensor storage_offsets); + // assume contiguous, `nested_strides` and `offsets` + // can be infered from `nested_sizes` + explicit NestedTensorImpl( + const at::Tensor& buffer, + const at::Tensor& nested_sizes); + + // This constructor is used creating view tensors from nested tensors + explicit NestedTensorImpl( + c10::TensorImpl::ImplType impl_type, + const at::Tensor& base_tensor, + at::Tensor nested_sizes, + at::Tensor nested_strides, + at::Tensor storage_offsets); + + // TODO: don't expose private implementation details like this; in + // particular, resizing this tensor will mess up our dim() and + // callers cannot fix it. + const Tensor& get_nested_sizes() const { + return nested_sizes_; + } + // TODO: don't expose private implementation details like this + const Tensor& get_nested_strides() const { + return nested_strides_; + } + const Tensor& get_storage_offsets() const { + return storage_offsets_; + } + // Returns nullopt if the ith dimension is irregular. The ith dimension + // of a NestedTensor is regular if the unbound tensors match in + // size at the (i-1)th dimension. + c10::optional opt_size(int64_t d) const; + + int64_t size(int64_t d) const { + c10::optional optional_size = this->opt_size(d); + TORCH_CHECK( + optional_size.has_value(), + "Given dimension ", + d, + " is irregular and does not have a size."); + return *optional_size; + } + /** + * Return a view of the nested tensor as a 1 dimensional contiguous tensor. + * + * The buffer tensor created by this function shares the same storage_impl as + * the original nested tensor, and therefore can be seen as a view. + * + * @return A newly constructed view tensor + */ + at::Tensor get_buffer() const { + TORCH_CHECK( + nested_tensor_impl_is_contiguous(this), + "NestedTensor must be contiguous to get buffer."); + return get_unsafe_storage_as_tensor(); + } + /** + * If possible use get_buffer() instead. This function returns the storage + * as a tensor directly, which is not safe to use in general. If using this + * function, The caller must ensure to account for nested_sizes, + * nested_strides and storage_offsets. + * + * @return A newly constructed view tensor + */ + at::Tensor get_unsafe_storage_as_tensor() const { + auto buffer_key_set_ = generate_buffer_key_set(); + const auto buffer_size = get_buffer_size(); + auto buffer_tensor_impl = c10::make_intrusive( + c10::TensorImpl::VIEW, Storage(storage_), buffer_key_set_, data_type_); + buffer_tensor_impl->set_sizes_contiguous( + c10::makeArrayRef(static_cast(buffer_size))); + return Tensor(buffer_tensor_impl); + } + + size_t get_buffer_size() const { + return storage_.nbytes() / data_type_.itemsize(); + } + + protected: + const char* tensorimpl_type_name() const override; + + // TODO: numel_custom and is_contiguous_custom can be profitably overridden + // with real implementations + int64_t numel_custom() const override; + c10::SymInt sym_numel_custom() const override; + bool is_contiguous_custom(MemoryFormat) const override; + int64_t size_custom(int64_t d) const override { + return this->size(d); + } + c10::SymInt sym_size_custom(int64_t d) const override { + return c10::SymInt{this->size(d)}; + } + IntArrayRef sizes_custom() const override; + c10::SymIntArrayRef sym_sizes_custom() const override; + IntArrayRef strides_custom() const override; + c10::SymIntArrayRef sym_strides_custom() const override; + + // this one is real + int64_t dim_custom() const override; + + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override; + + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override; + + void shallow_copy_from(const c10::intrusive_ptr& impl) override { + copy_tensor_metadata( + /*src_impl=*/impl.get(), + /*dest_impl=*/this, + /*version_counter=*/version_counter(), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change()); + } + + private: + // Must be called after any changes to our dim() to sync the state + // to TensorImpl. + void refresh_dim(); + + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const at::Tensor nested_sizes_, nested_strides_; + // The starting positions of the underlying tensors in contiguous buffer + // i.e. the buffer memory offsets to get the underlying tensors + // The reason to keep this metadata is that, without strong enough constraint + // it cannot be derived from `nested_sizes_` + // and `nested_strides_`: + // 1. when buffer has blanks, e.g. [tensor1, blank, tensor2] + // this can happen e.g. after slicing a nested tensor + // 2. when multiple tensors share a same memory + // 3. when the nesting ordering is changed, e.g. [tensor1, tensor3, tensor2] + // Some strong enough constraints are: + // 1. every underlying tensor is contiguous in memory + // && nesting in ascending order + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const at::Tensor storage_offsets_; + // NOTE: -1 here means the size is missing + // Optional to allow it to be computed lazily from nested. + // TODO: maybe we can remove this metadata since + // we can compute it from `nested_sizes_` + mutable c10::optional> opt_sizes_; + + template + c10::intrusive_ptr shallow_copy_and_detach_core( + VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const; + + /** + * Generates a non-nested key_set from a nested tensor. + * + * For many nested tensor kernel implementations a buffer tensor + * is generated and redispatched to a non-nested kernel this function + * generates the key set used by that buffer tensor + * + * @return Appropriate key set for non-nested tensor + */ + inline c10::DispatchKeySet generate_buffer_key_set() const { + auto buffer_key_set = this->key_set(); + const bool Autograd = buffer_key_set.has_any(c10::autograd_dispatch_keyset); + // Remove nested tensor specific keys + buffer_key_set = buffer_key_set - + c10::DispatchKeySet{ + c10::DispatchKey::NestedTensor, + c10::DispatchKey::AutogradNestedTensor}; + + // Add dense tensor specific keys + buffer_key_set = + buffer_key_set | c10::DispatchKeySet{c10::DispatchKey::Dense}; + buffer_key_set = Autograd + ? c10::DispatchKeySet{c10::DispatchKey::Autograd} | buffer_key_set + : buffer_key_set; + + return buffer_key_set; + } +}; + +inline NestedTensorImpl* get_nested_tensor_impl_or_null( + const at::Tensor& tensor) { + if (tensor.is_nested()) { + return static_cast(tensor.unsafeGetTensorImpl()); + } + return nullptr; +} + +inline NestedTensorImpl* get_nested_tensor_impl(const at::Tensor& tensor) { + TORCH_CHECK( + tensor.is_nested(), "get_nested_tensor_impl requires a NestedTensor."); + return static_cast(tensor.unsafeGetTensorImpl()); +} + +inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt) { + int64_t ntensors = nt->size(0); + if (ntensors == 0) { + return true; + } + const Tensor &sizemat = nt->get_nested_sizes(), + &stridemat = nt->get_nested_strides(); + int64_t* offsets_ptr = nt->get_storage_offsets().data_ptr(); + int64_t orig_dim = sizemat.size(1); + // nesting scalars + if (orig_dim == 0) { + // each scalar must be contiguous + // if there is blank memory between underlying scalars + for (int64_t i = 0; i < ntensors; i++) { + if (offsets_ptr[i] != i) { + return false; + } + } + } + // nesting tensors + else { + // if any underlying tensor is non-contiguous + const int64_t *sizemat_ptr = sizemat.data_ptr(), + *stridemat_ptr = stridemat.data_ptr(); + for (int64_t i = 0; i < ntensors; i++) { + if (stridemat_ptr[orig_dim - 1] != 1) { + return false; + } + int64_t product = sizemat_ptr[orig_dim - 1]; + for (int64_t j = orig_dim - 2; j >= 0; j--) { + if (stridemat_ptr[j] != product) { + return false; + } + product *= sizemat_ptr[j]; + } + sizemat_ptr += orig_dim; + stridemat_ptr += orig_dim; + } + // if there is blank memory between underlying tensors + if (offsets_ptr[0] != 0) { + return false; + } + sizemat_ptr = sizemat.data_ptr(); + stridemat_ptr = stridemat.data_ptr(); + for (int64_t i = 1; i < ntensors; i++) { + if (offsets_ptr[i] != + offsets_ptr[i - 1] + *sizemat_ptr * *stridemat_ptr) { + return false; + } + sizemat_ptr += orig_dim; + stridemat_ptr += orig_dim; + } + } + // everything is fine + return true; +} + +inline const at::Tensor& get_nested_sizes(const at::Tensor& tensor) { + return get_nested_tensor_impl(tensor)->get_nested_sizes(); +} + +} // namespace at::native diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h b/venv/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h new file mode 100644 index 0000000000000000000000000000000000000000..d00195b07e490208db6aa9a015bca79b0cc1c83f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h @@ -0,0 +1,69 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +// For FP16 or BFloat16 inputs, ops should perform internal math in FP32. +template +struct OpMathType { + using type = scalar_t; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType> { + using type = c10::complex; +}; + +template +using opmath_type = typename OpMathType::type; + +namespace { + +inline c10::ScalarType toOpMathType(const c10::ScalarType type) { + switch (type) { +#define DEFINE_CASE(scalar_t, TypeNum) \ + case ScalarType::TypeNum: \ + return CppTypeToScalarType>::value; + + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type); + } +} + +} // namespace + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h b/venv/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..f71ae5358f29962bec24159ca40c10ec120f6ef1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h @@ -0,0 +1,187 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { + +// An "Opaque" TensorImpl -- there are no strides and (for now) +// even data() is not supported (thus no pointer arithmetic). + +// NOTE: We could allow data() in the future, but would have to ensure pointer +// arithmetic code is properly guarded. +// +// NOTE: This does not support resize_ (and other metadata-changing ops) because +// of `shallow_copy_and_detach`. We would need to define an interface to +// "shallow copy" in order to add support. + +template +struct TORCH_API OpaqueTensorImpl : public TensorImpl { + // public constructor for now... + OpaqueTensorImpl( + at::DispatchKeySet key_set, + const caffe2::TypeMeta data_type, + c10::Device device, + OpaqueHandle opaque_handle, + c10::IntArrayRef sizes, + bool is_non_overlapping_and_dense = true) + : TensorImpl(key_set, data_type, device), + opaque_handle_(std::move(opaque_handle)) { + set_storage_access_should_throw(); + set_custom_sizes_strides(SizesStridesPolicy::CustomStrides); + sizes_and_strides_.set_sizes(sizes); + refresh_numel(); + // NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer) + is_non_overlapping_and_dense_ = is_non_overlapping_and_dense; + } + + // Destructor doesn't call release_resources because it's + // unnecessary; don't forget to change that if needed! + void release_resources() override { + TensorImpl::release_resources(); + opaque_handle_ = {}; + } + + void set_size(int64_t dim, int64_t new_size) override { + AT_ERROR("opaque tensors do not have set_size"); + } + + void set_stride(int64_t dim, int64_t new_stride) override { + AT_ERROR("opaque tensors do not have set_stride"); + } + + void set_storage_offset(int64_t storage_offset) override { + AT_ERROR("opaque tensors do not have set_storage_offset"); + } + +#ifdef DEBUG + bool has_storage() const override { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + !storage_, "OpaqueTensorImpl assumes that storage_ is never set"); + return false; + } +#endif + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive>( + key_set(), + dtype(), + device(), + opaque_handle_, + sizes_and_strides_.sizes_arrayref()); + copy_tensor_metadata( + /*src_opaque_impl=*/this, + /*dest_opaque_impl=*/impl.get(), + /*version_counter=*/version_counter, + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + return impl; + } + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive>( + key_set(), + dtype(), + device(), + opaque_handle_, + sizes_and_strides_.sizes_arrayref()); + copy_tensor_metadata( + /*src_opaque_impl=*/this, + /*dest_opaque_impl=*/impl.get(), + /*version_counter=*/std::move(version_counter), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + return impl; + } + + /** + * Shallow-copies data from another TensorImpl into this TensorImpl. + * + * For why this function doesn't check this TensorImpl's + * `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ]. + */ + void shallow_copy_from(const c10::intrusive_ptr& impl) override { + AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set())); + auto opaque_impl = + static_cast*>(impl.get()); + copy_tensor_metadata( + /*src_impl=*/opaque_impl, + /*dest_impl=*/this, + /*version_counter=*/version_counter(), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change()); + refresh_numel(); + } + + const OpaqueHandle& opaque_handle() const { + return opaque_handle_; + } + + OpaqueHandle& unsafe_opaque_handle() { + return opaque_handle_; + } + + protected: + /** + * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / + * storage_offset) from one TensorImpl to another TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE + * [ TensorImpl Shallow-Copying ]. + */ + static void copy_tensor_metadata( + const OpaqueTensorImpl* src_opaque_impl, + OpaqueTensorImpl* dest_opaque_impl, + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) { + TensorImpl::copy_tensor_metadata( + src_opaque_impl, + dest_opaque_impl, + version_counter, + allow_tensor_metadata_change); + + // OpaqueTensorImpl-specific fields. + dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_; + } + + static void copy_tensor_metadata( + const OpaqueTensorImpl* src_opaque_impl, + OpaqueTensorImpl* dest_opaque_impl, + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) { + TensorImpl::copy_tensor_metadata( + src_opaque_impl, + dest_opaque_impl, + std::move(version_counter), + allow_tensor_metadata_change); + + // OpaqueTensorImpl-specific fields. + dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_; + } + + private: + const char* tensorimpl_type_name() const override { + return "OpaqueTensorImpl"; + } + + OpaqueHandle opaque_handle_; +}; + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/Operators.h b/venv/lib/python3.10/site-packages/torch/include/ATen/Operators.h new file mode 100644 index 0000000000000000000000000000000000000000..42302ed16e2e4a5019208038bbf39b5ff6210af9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/Operators.h @@ -0,0 +1,1358 @@ +#pragma once + +// @generated by torchgen/gen.py from Operators.h + +#ifdef TORCH_ASSERT_NO_OPERATORS +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if your change would be better placed in \ + another file, or if a more specific header might achieve the same goal. \ + See NOTE: [Tensor vs. TensorBase] +#endif + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Extension writers: do you write wrapper functions? Are you frustrated with +// resolving overloads of operators? Are you frustrated with dealing with +// pointer-to-methods and resolving overloads of pointer-to-methods?? Look no +// further, this is the utility for you. +// +// Given an operator schema: aten::op.overload(... +// +// Use ATEN_FN2(op, overload) to get a *function* version of the operator +// that is guaranteed to not be overloaded. This means that you can safely +// decltype(&ATEN_FN2(op, overload)) it. NB: the 2 means this macro takes 2 args. +// +// Given an operator schema without an overload name: aten::op(... +// +// Use ATEN_FN(op) to get an unambiguous *function* version of the operator. +// +// There is some interesting behavior for out= operations. +// ATEN_FN2(sin, out) gives a function that is *faithful* to the schema; +// that is, the order of arguments is exactly what it looks like in the schema. + +#define ATEN_FN2(op_name, overload) at::_ops::op_name##_##overload::call +#define ATEN_FN(op_name) at::_ops::op_name::call + +// Separately, ATEN_OP(op) and ATEN_OP2(op, overload) define a class containing compile-time +// metadata about a given aten operator. +// Notable data on the class includes: +// - ATEN_OP2(add, Tensor)::name // returns the string name: "add" +// - ATEN_OP2(add, Tensor)::overload_name // returns the string overload name: "Tensor" +// - ATEN_OP2(add, Tensor)::schema // returns the C++ schema type: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) +// - ATEN_OP2(add, Tensor)::schema_str // returns the string jit type: "add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor" + +#define ATEN_OP2(op_name, overload) at::_ops::op_name##_##overload +#define ATEN_OP(op_name) at::_ops::op_name + +// WARNING: Please do not call any of the ops in the _ops namespace directly. +// Use the ATEN_FN macros. We do not guarantee stability of the naming +// scheme for the functions in at::_ops + +// See Note [The ATen Operators API] for details of the at::_ops namespace + +namespace at { +namespace _ops { + +} // namespace _ops +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/Parallel.h b/venv/lib/python3.10/site-packages/torch/include/ATen/Parallel.h new file mode 100644 index 0000000000000000000000000000000000000000..ff14f568d22a6e0d319bedb4e68194cd0971259e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/Parallel.h @@ -0,0 +1,160 @@ +#pragma once +#include +#include +#include +#include + +namespace at { + +inline int64_t divup(int64_t x, int64_t y) { + return (x + y - 1) / y; +} + +// Called during new thread initialization +TORCH_API void init_num_threads(); + +// Sets the number of threads to be used in parallel region +TORCH_API void set_num_threads(int); + +// Returns the maximum number of threads that may be used in a parallel region +TORCH_API int get_num_threads(); + +// Returns the current thread number (starting from 0) +// in the current parallel region, or 0 in the sequential region +TORCH_API int get_thread_num(); + +// Checks whether the code runs in parallel region +TORCH_API bool in_parallel_region(); + +namespace internal { + +// Initialise num_threads lazily at first parallel call +inline void lazy_init_num_threads() { + thread_local bool init = false; + if (C10_UNLIKELY(!init)) { + at::init_num_threads(); + init = true; + } +} + +TORCH_API void set_thread_num(int); + +class TORCH_API ThreadIdGuard { + public: + ThreadIdGuard(int new_id) : old_id_(at::get_thread_num()) { + set_thread_num(new_id); + } + + ~ThreadIdGuard() { + set_thread_num(old_id_); + } + + private: + int old_id_; +}; + +} // namespace internal + +/* +parallel_for + +begin: index at which to start applying user function + +end: index at which to stop applying user function + +grain_size: number of elements per chunk. impacts the degree of parallelization + +f: user function applied in parallel to the chunks, signature: + void f(int64_t begin, int64_t end) + +Warning: parallel_for does NOT copy thread local +states from the current thread to the worker threads. +This means for example that Tensor operations CANNOT be used in the +body of your function, only data pointers. +*/ +template +inline void parallel_for( + const int64_t begin, + const int64_t end, + const int64_t grain_size, + const F& f); + +/* +parallel_reduce + +begin: index at which to start applying reduction + +end: index at which to stop applying reduction + +grain_size: number of elements per chunk. impacts number of elements in +intermediate results tensor and degree of parallelization. + +ident: identity for binary combination function sf. sf(ident, x) needs to return +x. + +f: function for reduction over a chunk. f needs to be of signature scalar_t +f(int64_t partial_begin, int64_t partial_end, scalar_t identifiy) + +sf: function to combine two partial results. sf needs to be of signature +scalar_t sf(scalar_t x, scalar_t y) + +For example, you might have a tensor of 10000 entires and want to sum together +all the elements. Parallel_reduce with a grain_size of 2500 will then allocate +an intermediate result tensor with 4 elements. Then it will execute the function +"f" you provide and pass the beginning and end index of these chunks, so +0-2499, 2500-4999, etc. and the combination identity. It will then write out +the result from each of these chunks into the intermediate result tensor. After +that it'll reduce the partial results from each chunk into a single number using +the combination function sf and the identity ident. For a total summation this +would be "+" and 0 respectively. This is similar to tbb's approach [1], where +you need to provide a function to accumulate a subrange, a function to combine +two partial results and an identity. + +Warning: parallel_reduce does NOT copy thread local +states from the current thread to the worker threads. +This means for example that Tensor operations CANNOT be used in the +body of your function, only data pointers. + +[1] https://software.intel.com/en-us/node/506154 +*/ +template +inline scalar_t parallel_reduce( + const int64_t begin, + const int64_t end, + const int64_t grain_size, + const scalar_t ident, + const F& f, + const SF& sf); + +// Returns a detailed string describing parallelization settings +TORCH_API std::string get_parallel_info(); + +// Sets number of threads used for inter-op parallelism +TORCH_API void set_num_interop_threads(int); + +// Returns the number of threads used for inter-op parallelism +TORCH_API int get_num_interop_threads(); + +// Launches inter-op parallel task +TORCH_API void launch(std::function func); +namespace internal { +void launch_no_thread_state(std::function fn); +} // namespace internal + +// Launches intra-op parallel task +TORCH_API void intraop_launch(std::function func); + +// Returns number of intra-op threads used by default +TORCH_API int intraop_default_num_threads(); + +} // namespace at + +#if AT_PARALLEL_OPENMP +#include // IWYU pragma: keep +#elif AT_PARALLEL_NATIVE +#include // IWYU pragma: keep +#elif AT_PARALLEL_NATIVE_TBB +#include // IWYU pragma: keep +#endif + +#include // IWYU pragma: keep diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ParallelNative.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ParallelNative.h new file mode 100644 index 0000000000000000000000000000000000000000..8df093a99065f3a02490d5ec7747112454b6b44b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ParallelNative.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include +#include + +#include + +#define INTRA_OP_PARALLEL + +namespace at::internal { + +TORCH_API void invoke_parallel( + const int64_t begin, + const int64_t end, + const int64_t grain_size, + const std::function& f); + +} // namespace at::internal diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h new file mode 100644 index 0000000000000000000000000000000000000000..84e744ba10b10af06a234ade767c2a1caa34d9fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include +#include + +#ifdef _OPENMP +#define INTRA_OP_PARALLEL + +#include +#endif + +#ifdef _OPENMP +namespace at::internal { +template +inline void invoke_parallel( + int64_t begin, + int64_t end, + int64_t grain_size, + const F& f) { + std::atomic_flag err_flag = ATOMIC_FLAG_INIT; + std::exception_ptr eptr; + +#pragma omp parallel + { + // choose number of tasks based on grain size and number of threads + // can't use num_threads clause due to bugs in GOMP's thread pool (See + // #32008) + int64_t num_threads = omp_get_num_threads(); + if (grain_size > 0) { + num_threads = std::min(num_threads, divup((end - begin), grain_size)); + } + + int64_t tid = omp_get_thread_num(); + int64_t chunk_size = divup((end - begin), num_threads); + int64_t begin_tid = begin + tid * chunk_size; + if (begin_tid < end) { + try { + internal::ThreadIdGuard tid_guard(tid); + f(begin_tid, std::min(end, chunk_size + begin_tid)); + } catch (...) { + if (!err_flag.test_and_set()) { + eptr = std::current_exception(); + } + } + } + } + if (eptr) { + std::rethrow_exception(eptr); + } +} +} // namespace at::internal +#endif // _OPENMP diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/PythonTorchFunctionTLS.h b/venv/lib/python3.10/site-packages/torch/include/ATen/PythonTorchFunctionTLS.h new file mode 100644 index 0000000000000000000000000000000000000000..a8efa8eab357b55eb00d997ffe8458b841b0da36 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/PythonTorchFunctionTLS.h @@ -0,0 +1,34 @@ +#pragma once + +#include +#include + +namespace at::impl { + +enum TorchFunctionDisabledState { ENABLED, SUBCLASSES_DISABLED, ALL_DISABLED }; + +struct TORCH_API PythonTorchFunctionTLS { + static void set_disabled_state(TorchFunctionDisabledState disabled_state_); + static TorchFunctionDisabledState get_disabled_state(); + + static void push_onto_stack(std::shared_ptr mode); + static const std::shared_ptr pop_stack(); + static const std::shared_ptr& get_stack_at(int64_t idx); + static int64_t stack_len(); + + static const PythonTorchFunctionTLS& get_state(); + static void set_state(const PythonTorchFunctionTLS& state); + + private: + // The mode TLS is split into + // - disabled_state, which says which part of torch function are disabled + // - stack_, which is a vector of modes representing the stack of user + // defined modes + TorchFunctionDisabledState disabled_state_ = + TorchFunctionDisabledState::ENABLED; + std::vector> stack_; +}; + +TORCH_API bool torch_function_mode_enabled(); + +} // namespace at::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h b/venv/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h new file mode 100644 index 0000000000000000000000000000000000000000..5bb358e0e537796aefe5fcfa6afe72de713e0e33 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h @@ -0,0 +1,3099 @@ +// This file contains all native_functions that can be registered to +// and the schema string that they should be registered with + +Tensor _cast_Byte(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Char(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Double(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Float(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Int(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Long(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Short(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Half(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +void _backward(const Tensor & self, TensorList inputs, const c10::optional & gradient, c10::optional retain_graph, bool create_graph); // {"schema": "aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()", "dispatch": "False", "default": "True"} +void set_data(Tensor & self, const Tensor & new_data); // {"schema": "aten::set_data(Tensor(a!) self, Tensor new_data) -> ()", "dispatch": "False", "default": "True"} +Tensor data(const Tensor & self); // {"schema": "aten::data(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +bool is_leaf(const Tensor & self); // {"schema": "aten::is_leaf(Tensor self) -> bool", "dispatch": "False", "default": "True"} +int64_t output_nr(const Tensor & self); // {"schema": "aten::output_nr(Tensor self) -> int", "dispatch": "False", "default": "True"} +int64_t _version(const Tensor & self); // {"schema": "aten::_version(Tensor self) -> int", "dispatch": "False", "default": "True"} +Tensor & requires_grad_(Tensor & self, bool requires_grad); // {"schema": "aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)", "dispatch": "False", "default": "True"} +void retain_grad(Tensor & self); // {"schema": "aten::retain_grad(Tensor(a!) self) -> ()", "dispatch": "False", "default": "True"} +bool retains_grad(const Tensor & self); // {"schema": "aten::retains_grad(Tensor self) -> bool", "dispatch": "False", "default": "True"} +Tensor _fw_primal(const Tensor & self, int64_t level); // {"schema": "aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor _make_dual(const Tensor & primal, const Tensor & tangent, int64_t level); // {"schema": "aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)", "dispatch": "True", "default": "True"} +::std::tuple _unpack_dual(const Tensor & dual, int64_t level); // {"schema": "aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)", "dispatch": "False", "default": "True"} +Tensor _new_zeros_with_same_feature_meta(const Tensor & self, const Tensor & other, int64_t self_num_batch_dims); // {"schema": "aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor", "dispatch": "True", "default": "True"} +bool _has_same_storage_numel(const Tensor & self, const Tensor & other); // {"schema": "aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool", "dispatch": "True", "default": "True"} +Tensor & rename_(Tensor & self, c10::optional names); // {"schema": "aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor rename(const Tensor & self, c10::optional names); // {"schema": "aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor align_to(const Tensor & self, DimnameList names); // {"schema": "aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor align_to(const Tensor & self, DimnameList order, int64_t ellipsis_idx); // {"schema": "aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor align_as(const Tensor & self, const Tensor & other); // {"schema": "aten::align_as(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector align_tensors(TensorList tensors); // {"schema": "aten::align_tensors(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +void _assert_async(const Tensor & self); // {"schema": "aten::_assert_async(Tensor self) -> ()", "dispatch": "True", "default": "False"} +void _assert_async(const Tensor & self, c10::string_view assert_msg); // {"schema": "aten::_assert_async.msg(Tensor self, str assert_msg) -> ()", "dispatch": "True", "default": "False"} +void _assert_scalar(const Scalar & self, c10::string_view assert_msg); // {"schema": "aten::_assert_scalar(Scalar self, str assert_msg) -> ()", "dispatch": "True", "default": "True"} +Tensor _functional_assert_scalar(const Scalar & self, c10::string_view assert_msg, const Tensor & dep_token); // {"schema": "aten::_functional_assert_scalar(Scalar self, str assert_msg, Tensor dep_token) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _functional_assert_async(const Tensor & self, c10::string_view assert_msg, const Tensor & dep_token); // {"schema": "aten::_functional_assert_async.msg(Tensor self, str assert_msg, Tensor dep_token) -> Tensor", "dispatch": "True", "default": "False"} +void _assert_tensor_metadata(const Tensor & a, OptionalSymIntArrayRef size, OptionalSymIntArrayRef stride, c10::optional dtype); // {"schema": "aten::_assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None) -> ()", "dispatch": "False", "default": "True"} +void _print(c10::string_view s); // {"schema": "aten::_print(str s) -> ()", "dispatch": "True", "default": "True"} +void sym_constrain_range(const Scalar & size, c10::optional min, c10::optional max); // {"schema": "aten::sym_constrain_range(Scalar size, *, int? min=None, int? max=None) -> ()", "dispatch": "True", "default": "True"} +void sym_constrain_range_for_size(const Scalar & size, c10::optional min, c10::optional max); // {"schema": "aten::sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> ()", "dispatch": "True", "default": "True"} +Tensor _functional_sym_constrain_range(const Scalar & size, c10::optional min, c10::optional max, const Tensor & dep_token); // {"schema": "aten::_functional_sym_constrain_range(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _functional_sym_constrain_range_for_size(const Scalar & size, c10::optional min, c10::optional max, const Tensor & dep_token); // {"schema": "aten::_functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _make_dep_token(c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor refine_names(const Tensor & self, DimnameList names); // {"schema": "aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)", "dispatch": "False", "default": "True"} +bool _use_cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank); // {"schema": "aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool", "dispatch": "True", "default": "False"} +bool _use_cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank); // {"schema": "aten::_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool", "dispatch": "True", "default": "False"} +::std::tuple _cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity); // {"schema": "aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity); // {"schema": "aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +bool _use_cudnn_rnn_flatten_weight(); // {"schema": "aten::_use_cudnn_rnn_flatten_weight() -> bool", "dispatch": "False", "default": "True"} +Tensor _cudnn_rnn_flatten_weight(TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional); // {"schema": "aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _cudnn_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state); // {"schema": "aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple> _cudnn_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const c10::optional & cx, const Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const Tensor & reserve, ::std::array output_mask); // {"schema": "aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])", "dispatch": "True", "default": "False"} +Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "False"} +int64_t _debug_has_internal_overlap(const Tensor & self); // {"schema": "aten::_debug_has_internal_overlap(Tensor self) -> int", "dispatch": "False", "default": "True"} +::std::tuple _fused_dropout(const Tensor & self, double p, c10::optional generator); // {"schema": "aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _masked_scale(const Tensor & self, const Tensor & mask, double scale); // {"schema": "aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple native_dropout(const Tensor & input, double p, c10::optional train); // {"schema": "aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor native_dropout_backward(const Tensor & grad_output, const Tensor & mask, double scale); // {"schema": "aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _sobol_engine_draw(const Tensor & quasi, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional dtype); // {"schema": "aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor & _sobol_engine_ff_(Tensor & self, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated); // {"schema": "aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & _sobol_engine_scramble_(Tensor & self, const Tensor & ltm, int64_t dimension); // {"schema": "aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & _sobol_engine_initialize_state_(Tensor & self, int64_t dimension); // {"schema": "aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor _reshape_from_tensor(const Tensor & self, const Tensor & shape); // {"schema": "aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _shape_as_tensor(const Tensor & self); // {"schema": "aten::_shape_as_tensor(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor dropout(const Tensor & input, double p, bool train); // {"schema": "aten::dropout(Tensor input, float p, bool train) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & dropout_(Tensor & self, double p, bool train); // {"schema": "aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor feature_dropout(const Tensor & input, double p, bool train); // {"schema": "aten::feature_dropout(Tensor input, float p, bool train) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & feature_dropout_(Tensor & self, double p, bool train); // {"schema": "aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor alpha_dropout(const Tensor & input, double p, bool train); // {"schema": "aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & alpha_dropout_(Tensor & self, double p, bool train); // {"schema": "aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor feature_alpha_dropout(const Tensor & input, double p, bool train); // {"schema": "aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & feature_alpha_dropout_(Tensor & self, double p, bool train); // {"schema": "aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor abs(const Tensor & self); // {"schema": "aten::abs(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & abs_(Tensor & self); // {"schema": "aten::abs_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & abs_out(const Tensor & self, Tensor & out); // {"schema": "aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor absolute(const Tensor & self); // {"schema": "aten::absolute(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & absolute_(Tensor & self); // {"schema": "aten::absolute_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & absolute_out(const Tensor & self, Tensor & out); // {"schema": "aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor angle(const Tensor & self); // {"schema": "aten::angle(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & angle_out(const Tensor & self, Tensor & out); // {"schema": "aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor view_as_real(const Tensor & self); // {"schema": "aten::view_as_real(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor view_as_complex(const Tensor & self); // {"schema": "aten::view_as_complex(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor sgn(const Tensor & self); // {"schema": "aten::sgn(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sgn_(Tensor & self); // {"schema": "aten::sgn_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sgn_out(const Tensor & self, Tensor & out); // {"schema": "aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor chalf(const Tensor & self, c10::optional memory_format); // {"schema": "aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor real(const Tensor & self); // {"schema": "aten::real(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor imag(const Tensor & self); // {"schema": "aten::imag(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _conj(const Tensor & self); // {"schema": "aten::_conj(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor conj(const Tensor & self); // {"schema": "aten::conj(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _conj_physical(const Tensor & self); // {"schema": "aten::_conj_physical(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor conj_physical(const Tensor & self); // {"schema": "aten::conj_physical(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & conj_physical_out(const Tensor & self, Tensor & out); // {"schema": "aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & conj_physical_(Tensor & self); // {"schema": "aten::conj_physical_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor resolve_conj(const Tensor & self); // {"schema": "aten::resolve_conj(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor resolve_neg(const Tensor & self); // {"schema": "aten::resolve_neg(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _neg_view(const Tensor & self); // {"schema": "aten::_neg_view(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor acos(const Tensor & self); // {"schema": "aten::acos(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & acos_(Tensor & self); // {"schema": "aten::acos_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & acos_out(const Tensor & self, Tensor & out); // {"schema": "aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arccos(const Tensor & self); // {"schema": "aten::arccos(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arccos_(Tensor & self); // {"schema": "aten::arccos_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arccos_out(const Tensor & self, Tensor & out); // {"schema": "aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor avg_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad); // {"schema": "aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor", "dispatch": "False", "default": "True"} +Tensor adaptive_avg_pool1d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor add(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & add_(Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & add_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _add_relu(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _add_relu_(Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _add_relu_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _add_relu(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _add_relu_(Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor add(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & add_(Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & addmv_out(const Tensor & self, const Tensor & mat, const Tensor & vec, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & addr_out(const Tensor & self, const Tensor & vec1, const Tensor & vec2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor affine_grid_generator(const Tensor & theta, c10::SymIntArrayRef size, bool align_corners); // {"schema": "aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor", "dispatch": "True", "default": "True"} +Tensor affine_grid_generator_backward(const Tensor & grad, c10::SymIntArrayRef size, bool align_corners); // {"schema": "aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _is_all_true(const Tensor & self); // {"schema": "aten::_is_all_true(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _is_any_true(const Tensor & self); // {"schema": "aten::_is_any_true(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _test_check_tensor(const Tensor & self); // {"schema": "aten::_test_check_tensor(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_functorch_fallback(const Tensor & self, const Tensor & other); // {"schema": "aten::_test_functorch_fallback(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor all(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor all(const Tensor & self, OptionalIntArrayRef dim, bool keepdim); // {"schema": "aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & all_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & out); // {"schema": "aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & all_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor all(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & all_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & out); // {"schema": "aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +bool allclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan); // {"schema": "aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool", "dispatch": "True", "default": "True"} +Tensor any(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor any(const Tensor & self, OptionalIntArrayRef dim, bool keepdim); // {"schema": "aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & any_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & out); // {"schema": "aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & any_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor any(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & any_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & out); // {"schema": "aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor arange(const Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor arange(const Scalar & start, const Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor arange(const Scalar & start, const Scalar & end, const Scalar & step, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & arange_out(const Scalar & end, Tensor & out); // {"schema": "aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & arange_out(const Scalar & start, const Scalar & end, const Scalar & step, Tensor & out); // {"schema": "aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _dim_arange(const Tensor & like, int64_t dim); // {"schema": "aten::_dim_arange(Tensor like, int dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor argmax(const Tensor & self, c10::optional dim, bool keepdim); // {"schema": "aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & argmax_out(const Tensor & self, c10::optional dim, bool keepdim, Tensor & out); // {"schema": "aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor argmin(const Tensor & self, c10::optional dim, bool keepdim); // {"schema": "aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & argmin_out(const Tensor & self, c10::optional dim, bool keepdim, Tensor & out); // {"schema": "aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor acosh(const Tensor & self); // {"schema": "aten::acosh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & acosh_(Tensor & self); // {"schema": "aten::acosh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & acosh_out(const Tensor & self, Tensor & out); // {"schema": "aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arccosh(const Tensor & self); // {"schema": "aten::arccosh(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arccosh_(Tensor & self); // {"schema": "aten::arccosh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arccosh_out(const Tensor & self, Tensor & out); // {"schema": "aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor asinh(const Tensor & self); // {"schema": "aten::asinh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & asinh_(Tensor & self); // {"schema": "aten::asinh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & asinh_out(const Tensor & self, Tensor & out); // {"schema": "aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arcsinh(const Tensor & self); // {"schema": "aten::arcsinh(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arcsinh_(Tensor & self); // {"schema": "aten::arcsinh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arcsinh_out(const Tensor & self, Tensor & out); // {"schema": "aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor atanh(const Tensor & self); // {"schema": "aten::atanh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & atanh_(Tensor & self); // {"schema": "aten::atanh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & atanh_out(const Tensor & self, Tensor & out); // {"schema": "aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arctanh(const Tensor & self); // {"schema": "aten::arctanh(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arctanh_(Tensor & self); // {"schema": "aten::arctanh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arctanh_out(const Tensor & self, Tensor & out); // {"schema": "aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor as_strided(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); // {"schema": "aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)", "dispatch": "True", "default": "False"} +const Tensor & as_strided_(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); // {"schema": "aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor asin(const Tensor & self); // {"schema": "aten::asin(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & asin_(Tensor & self); // {"schema": "aten::asin_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & asin_out(const Tensor & self, Tensor & out); // {"schema": "aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arcsin(const Tensor & self); // {"schema": "aten::arcsin(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arcsin_(Tensor & self); // {"schema": "aten::arcsin_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arcsin_out(const Tensor & self, Tensor & out); // {"schema": "aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor atan(const Tensor & self); // {"schema": "aten::atan(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & atan_(Tensor & self); // {"schema": "aten::atan_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & atan_out(const Tensor & self, Tensor & out); // {"schema": "aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arctan(const Tensor & self); // {"schema": "aten::arctan(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arctan_(Tensor & self); // {"schema": "aten::arctan_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arctan_out(const Tensor & self, Tensor & out); // {"schema": "aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor atleast_1d(const Tensor & self); // {"schema": "aten::atleast_1d(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector atleast_1d(TensorList tensors); // {"schema": "aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor atleast_2d(const Tensor & self); // {"schema": "aten::atleast_2d(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector atleast_2d(TensorList tensors); // {"schema": "aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor atleast_3d(const Tensor & self); // {"schema": "aten::atleast_3d(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector atleast_3d(TensorList tensors); // {"schema": "aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & baddbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & baddbmm_out(const Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bartlett_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bartlett_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor batch_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled); // {"schema": "aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantized_batch_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & mean, const Tensor & var, double eps, double output_scale, int64_t output_zero_point); // {"schema": "aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _batch_norm_impl_index(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled); // {"schema": "aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)", "dispatch": "False", "default": "True"} +::std::tuple _batch_norm_impl_index_backward(int64_t impl_index, const Tensor & input, const Tensor & grad_output, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var_transform, bool train, double eps, ::std::array output_mask, const Tensor & reservedSpace); // {"schema": "aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor bernoulli(const Tensor & self, c10::optional generator); // {"schema": "aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bernoulli_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bernoulli_(Tensor & self, const Tensor & p, c10::optional generator); // {"schema": "aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bernoulli_(Tensor & self, double p, c10::optional generator); // {"schema": "aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bernoulli(const Tensor & self, double p, c10::optional generator); // {"schema": "aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bilinear(const Tensor & input1, const Tensor & input2, const Tensor & weight, const c10::optional & bias); // {"schema": "aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction); // {"schema": "aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & binary_cross_entropy_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, Tensor & out); // {"schema": "aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction); // {"schema": "aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & binary_cross_entropy_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, Tensor & grad_input); // {"schema": "aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor binary_cross_entropy_with_logits(const Tensor & self, const Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction); // {"schema": "aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bincount(const Tensor & self, const c10::optional & weights, int64_t minlength); // {"schema": "aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor", "dispatch": "True", "default": "False"} +Tensor bitwise_not(const Tensor & self); // {"schema": "aten::bitwise_not(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_not_(Tensor & self); // {"schema": "aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_not_out(const Tensor & self, Tensor & out); // {"schema": "aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & copysign_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor copysign(const Tensor & self, const Tensor & other); // {"schema": "aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & copysign_(Tensor & self, const Tensor & other); // {"schema": "aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor copysign(const Tensor & self, const Scalar & other); // {"schema": "aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & copysign_(Tensor & self, const Scalar & other); // {"schema": "aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & copysign_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _lazy_clone(const Tensor & self); // {"schema": "aten::_lazy_clone(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor logical_not(const Tensor & self); // {"schema": "aten::logical_not(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logical_not_(Tensor & self); // {"schema": "aten::logical_not_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logical_not_out(const Tensor & self, Tensor & out); // {"schema": "aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logical_xor(const Tensor & self, const Tensor & other); // {"schema": "aten::logical_xor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logical_xor_(Tensor & self, const Tensor & other); // {"schema": "aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logical_xor_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logical_and(const Tensor & self, const Tensor & other); // {"schema": "aten::logical_and(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logical_and_(Tensor & self, const Tensor & other); // {"schema": "aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logical_and_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logical_or(const Tensor & self, const Tensor & other); // {"schema": "aten::logical_or(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logical_or_(Tensor & self, const Tensor & other); // {"schema": "aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logical_or_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor blackman_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor blackman_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bmm(const Tensor & self, const Tensor & mat2); // {"schema": "aten::bmm(Tensor self, Tensor mat2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bmm_out(const Tensor & self, const Tensor & mat2, Tensor & out); // {"schema": "aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::vector broadcast_tensors(TensorList tensors); // {"schema": "aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor broadcast_to(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _sparse_broadcast_to(const Tensor & self, IntArrayRef size); // {"schema": "aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor cat(const ITensorListRef & tensors, int64_t dim); // {"schema": "aten::cat(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cat_out(const ITensorListRef & tensors, int64_t dim, Tensor & out); // {"schema": "aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cat(TensorList tensors, Dimname dim); // {"schema": "aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & cat_out(TensorList tensors, Dimname dim, Tensor & out); // {"schema": "aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor concat(TensorList tensors, int64_t dim); // {"schema": "aten::concat(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & concat_out(TensorList tensors, int64_t dim, Tensor & out); // {"schema": "aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor concat(TensorList tensors, Dimname dim); // {"schema": "aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & concat_out(TensorList tensors, Dimname dim, Tensor & out); // {"schema": "aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor concatenate(TensorList tensors, int64_t dim); // {"schema": "aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & concatenate_out(TensorList tensors, int64_t dim, Tensor & out); // {"schema": "aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor concatenate(TensorList tensors, Dimname dim); // {"schema": "aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & concatenate_out(TensorList tensors, Dimname dim, Tensor & out); // {"schema": "aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor block_diag(TensorList tensors); // {"schema": "aten::block_diag(Tensor[] tensors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor ceil(const Tensor & self); // {"schema": "aten::ceil(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ceil_(Tensor & self); // {"schema": "aten::ceil_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ceil_out(const Tensor & self, Tensor & out); // {"schema": "aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor chain_matmul(TensorList matrices); // {"schema": "aten::chain_matmul(Tensor[] matrices) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & chain_matmul_out(TensorList matrices, Tensor & out); // {"schema": "aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::vector unsafe_chunk(const Tensor & self, int64_t chunks, int64_t dim); // {"schema": "aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector chunk(const Tensor & self, int64_t chunks, int64_t dim); // {"schema": "aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]", "dispatch": "True", "default": "True"} +::std::vector tensor_split(const Tensor & self, c10::SymInt sections, int64_t dim); // {"schema": "aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector tensor_split(const Tensor & self, c10::SymIntArrayRef indices, int64_t dim); // {"schema": "aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector tensor_split(const Tensor & self, const Tensor & tensor_indices_or_sections, int64_t dim); // {"schema": "aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +Tensor clamp(const Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor clamp(const Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & clamp_(Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_(Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_out(const Tensor & self, const c10::optional & min, const c10::optional & max, Tensor & out); // {"schema": "aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & clamp_out(const Tensor & self, const c10::optional & min, const c10::optional & max, Tensor & out); // {"schema": "aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor clamp_max(const Tensor & self, const Scalar & max); // {"schema": "aten::clamp_max(Tensor self, Scalar max) -> Tensor", "dispatch": "True", "default": "True"} +Tensor clamp_max(const Tensor & self, const Tensor & max); // {"schema": "aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & clamp_max_(Tensor & self, const Scalar & max); // {"schema": "aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_max_(Tensor & self, const Tensor & max); // {"schema": "aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_max_out(const Tensor & self, const Scalar & max, Tensor & out); // {"schema": "aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & clamp_max_out(const Tensor & self, const Tensor & max, Tensor & out); // {"schema": "aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor clamp_min(const Tensor & self, const Scalar & min); // {"schema": "aten::clamp_min(Tensor self, Scalar min) -> Tensor", "dispatch": "True", "default": "True"} +Tensor clamp_min(const Tensor & self, const Tensor & min); // {"schema": "aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & clamp_min_(Tensor & self, const Scalar & min); // {"schema": "aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_min_(Tensor & self, const Tensor & min); // {"schema": "aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_min_out(const Tensor & self, const Scalar & min, Tensor & out); // {"schema": "aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & clamp_min_out(const Tensor & self, const Tensor & min, Tensor & out); // {"schema": "aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor clip(const Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor clip(const Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & clip_(Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & clip_(Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & clip_out(const Tensor & self, const c10::optional & min, const c10::optional & max, Tensor & out); // {"schema": "aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & clip_out(const Tensor & self, const c10::optional & min, const c10::optional & max, Tensor & out); // {"schema": "aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +bool cudnn_is_acceptable(const Tensor & self); // {"schema": "aten::cudnn_is_acceptable(Tensor self) -> bool", "dispatch": "False", "default": "True"} +Tensor complex(const Tensor & real, const Tensor & imag); // {"schema": "aten::complex(Tensor real, Tensor imag) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & complex_out(const Tensor & real, const Tensor & imag, Tensor & out); // {"schema": "aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor polar(const Tensor & abs, const Tensor & angle); // {"schema": "aten::polar(Tensor abs, Tensor angle) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & polar_out(const Tensor & abs, const Tensor & angle, Tensor & out); // {"schema": "aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor constant_pad_nd(const Tensor & self, c10::SymIntArrayRef pad, const Scalar & value); // {"schema": "aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor contiguous(const Tensor & self, MemoryFormat memory_format); // {"schema": "aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor convolution(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups); // {"schema": "aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple convolution_backward(const Tensor & grad_output, const Tensor & input, const Tensor & weight, OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask); // {"schema": "aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor convolution_overrideable(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups); // {"schema": "aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple convolution_backward_overrideable(const Tensor & grad_output, const Tensor & input, const Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask); // {"schema": "aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)", "dispatch": "True", "default": "True"} +Tensor _convolution(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32); // {"schema": "aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _convolution(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, IntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled); // {"schema": "aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _convolution_mode(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _convolution_double_backward(const c10::optional & ggI, const c10::optional & ggW, const c10::optional & ggb, const Tensor & gO, const Tensor & weight, const Tensor & self, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask); // {"schema": "aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor conv1d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv2d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv3d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv1d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding=\"valid\", SymInt[1] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv2d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding=\"valid\", SymInt[2] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv3d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding=\"valid\", SymInt[3] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv_tbc(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad); // {"schema": "aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple conv_tbc_backward(const Tensor & self, const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t pad); // {"schema": "aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor conv_transpose1d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation); // {"schema": "aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv_transpose2d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation); // {"schema": "aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv_transpose3d(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation); // {"schema": "aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor copy(const Tensor & self, const Tensor & src, bool non_blocking); // {"schema": "aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & copy_(Tensor & self, const Tensor & src, bool non_blocking); // {"schema": "aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _copy_from(const Tensor & self, const Tensor & dst, bool non_blocking); // {"schema": "aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _copy_from_and_resize(const Tensor & self, const Tensor & dst); // {"schema": "aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cos(const Tensor & self); // {"schema": "aten::cos(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cos_(Tensor & self); // {"schema": "aten::cos_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cos_out(const Tensor & self, Tensor & out); // {"schema": "aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cosh(const Tensor & self); // {"schema": "aten::cosh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cosh_(Tensor & self); // {"schema": "aten::cosh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cosh_out(const Tensor & self, Tensor & out); // {"schema": "aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cosine_embedding_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction); // {"schema": "aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor count_nonzero(const Tensor & self, IntArrayRef dim); // {"schema": "aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor count_nonzero(const Tensor & self, c10::optional dim); // {"schema": "aten::count_nonzero(Tensor self, int? dim=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor cov(const Tensor & self, int64_t correction, const c10::optional & fweights, const c10::optional & aweights); // {"schema": "aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor corrcoef(const Tensor & self); // {"schema": "aten::corrcoef(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cudnn_affine_grid_generator(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W); // {"schema": "aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid", "dispatch": "True", "default": "False"} +Tensor cudnn_affine_grid_generator_backward(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W); // {"schema": "aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta", "dispatch": "True", "default": "False"} +::std::tuple cudnn_batch_norm(const Tensor & input, const Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); // {"schema": "aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple cudnn_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const Tensor & reserveSpace); // {"schema": "aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor cudnn_convolution(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32); // {"schema": "aten::cudnn_convolution(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & cudnn_convolution_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, Tensor & out); // {"schema": "aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cudnn_convolution_transpose(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32); // {"schema": "aten::cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _mps_convolution_transpose(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::_mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple mps_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask); // {"schema": "aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor cudnn_convolution_relu(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cudnn_convolution_add_relu(const Tensor & self, const Tensor & weight, const Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cudnn_grid_sampler(const Tensor & self, const Tensor & grid); // {"schema": "aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output", "dispatch": "True", "default": "False"} +::std::tuple cudnn_grid_sampler_backward(const Tensor & self, const Tensor & grid, const Tensor & grad_output); // {"schema": "aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)", "dispatch": "True", "default": "False"} +::std::tuple cummax(const Tensor & self, int64_t dim); // {"schema": "aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple cummax_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices); // {"schema": "aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "True"} +::std::tuple cummax(const Tensor & self, Dimname dim); // {"schema": "aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple cummax_out(const Tensor & self, Dimname dim, Tensor & values, Tensor & indices); // {"schema": "aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +void _cummax_helper(const Tensor & self, Tensor & values, Tensor & indices, int64_t dim); // {"schema": "aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()", "dispatch": "True", "default": "False"} +::std::tuple cummin(const Tensor & self, int64_t dim); // {"schema": "aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple cummin_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices); // {"schema": "aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "True"} +::std::tuple cummin(const Tensor & self, Dimname dim); // {"schema": "aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple cummin_out(const Tensor & self, Dimname dim, Tensor & values, Tensor & indices); // {"schema": "aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +void _cummin_helper(const Tensor & self, Tensor & values, Tensor & indices, int64_t dim); // {"schema": "aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()", "dispatch": "True", "default": "False"} +Tensor cummaxmin_backward(const Tensor & grad, const Tensor & input, const Tensor & indices, int64_t dim); // {"schema": "aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cumprod(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cumprod_(Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cumprod_out(const Tensor & self, int64_t dim, c10::optional dtype, Tensor & out); // {"schema": "aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cumprod(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & cumprod_(Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & cumprod_out(const Tensor & self, Dimname dim, c10::optional dtype, Tensor & out); // {"schema": "aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor cumprod_backward(const Tensor & grad, const Tensor & input, int64_t dim, const Tensor & output); // {"schema": "aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cumsum(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cumsum_(Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cumsum_out(const Tensor & self, int64_t dim, c10::optional dtype, Tensor & out); // {"schema": "aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cumsum(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & cumsum_(Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & cumsum_out(const Tensor & self, Dimname dim, c10::optional dtype, Tensor & out); // {"schema": "aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor cumulative_trapezoid(const Tensor & y, const Tensor & x, int64_t dim); // {"schema": "aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cumulative_trapezoid(const Tensor & y, const Scalar & dx, int64_t dim); // {"schema": "aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity); // {"schema": "aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity); // {"schema": "aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool zero_infinity); // {"schema": "aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, bool zero_infinity); // {"schema": "aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _ctc_loss_backward(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank, bool zero_infinity); // {"schema": "aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _ctc_loss_backward(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank, bool zero_infinity); // {"schema": "aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor diag_embed(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor diagflat(const Tensor & self, int64_t offset); // {"schema": "aten::diagflat(Tensor self, int offset=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor diagonal(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor linalg_diagonal(const Tensor & A, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor diagonal(const Tensor & self, Dimname outdim, Dimname dim1, Dimname dim2, int64_t offset); // {"schema": "aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor diagonal_backward(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fill_diagonal_(Tensor & self, const Scalar & fill_value, bool wrap); // {"schema": "aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor diff(const Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append); // {"schema": "aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & diff_out(const Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append, Tensor & out); // {"schema": "aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, const c10::optional & spacing, c10::optional dim, int64_t edge_order); // {"schema": "aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, const Scalar & spacing, IntArrayRef dim, int64_t edge_order); // {"schema": "aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, IntArrayRef dim, int64_t edge_order); // {"schema": "aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, ArrayRef spacing, c10::optional dim, int64_t edge_order); // {"schema": "aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, ArrayRef spacing, IntArrayRef dim, int64_t edge_order); // {"schema": "aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, TensorList spacing, c10::optional dim, int64_t edge_order); // {"schema": "aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, TensorList spacing, IntArrayRef dim, int64_t edge_order); // {"schema": "aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor div(const Tensor & self, const Tensor & other); // {"schema": "aten::div.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & div_(Tensor & self, const Tensor & other); // {"schema": "aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & div_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor div(const Tensor & self, const Tensor & other, c10::optional rounding_mode); // {"schema": "aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & div_(Tensor & self, const Tensor & other, c10::optional rounding_mode); // {"schema": "aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & div_out(const Tensor & self, const Tensor & other, c10::optional rounding_mode, Tensor & out); // {"schema": "aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor div(const Tensor & self, const Scalar & other); // {"schema": "aten::div.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & div_(Tensor & self, const Scalar & other); // {"schema": "aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor div(const Tensor & self, const Scalar & other, c10::optional rounding_mode); // {"schema": "aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & div_(Tensor & self, const Scalar & other, c10::optional rounding_mode); // {"schema": "aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor divide(const Tensor & self, const Tensor & other); // {"schema": "aten::divide.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & divide_(Tensor & self, const Tensor & other); // {"schema": "aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & divide_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor divide(const Tensor & self, const Scalar & other); // {"schema": "aten::divide.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & divide_(Tensor & self, const Scalar & other); // {"schema": "aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor divide(const Tensor & self, const Tensor & other, c10::optional rounding_mode); // {"schema": "aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & divide_(Tensor & self, const Tensor & other, c10::optional rounding_mode); // {"schema": "aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & divide_out(const Tensor & self, const Tensor & other, c10::optional rounding_mode, Tensor & out); // {"schema": "aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor divide(const Tensor & self, const Scalar & other, c10::optional rounding_mode); // {"schema": "aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & divide_(Tensor & self, const Scalar & other, c10::optional rounding_mode); // {"schema": "aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor true_divide(const Tensor & self, const Tensor & other); // {"schema": "aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & true_divide_(Tensor & self, const Tensor & other); // {"schema": "aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & true_divide_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor true_divide(const Tensor & self, const Scalar & other); // {"schema": "aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & true_divide_(Tensor & self, const Scalar & other); // {"schema": "aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor dot(const Tensor & self, const Tensor & tensor); // {"schema": "aten::dot(Tensor self, Tensor tensor) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & dot_out(const Tensor & self, const Tensor & tensor, Tensor & out); // {"schema": "aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor vdot(const Tensor & self, const Tensor & other); // {"schema": "aten::vdot(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & vdot_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor einsum(c10::string_view equation, TensorList tensors, OptionalIntArrayRef path); // {"schema": "aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor embedding(const Tensor & weight, const Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse); // {"schema": "aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor embedding_backward(const Tensor & grad, const Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse); // {"schema": "aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor", "dispatch": "False", "default": "True"} +Tensor embedding_dense_backward(const Tensor & grad_output, const Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq); // {"schema": "aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & embedding_renorm_(Tensor & self, const Tensor & indices, double max_norm, double norm_type); // {"schema": "aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor embedding_sparse_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); // {"schema": "aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _embedding_bag_forward_only(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx); // {"schema": "aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _rowwise_prune(const Tensor & weight, const Tensor & mask, ScalarType compressed_indices_dtype); // {"schema": "aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor row_stack(TensorList tensors); // {"schema": "aten::row_stack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & row_stack_out(TensorList tensors, Tensor & out); // {"schema": "aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset); // {"schema": "aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, c10::optional padding_idx); // {"schema": "aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple _embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx); // {"schema": "aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _embedding_bag_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx); // {"schema": "aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _embedding_bag_sparse_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx); // {"schema": "aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _embedding_bag_dense_backward(const Tensor & grad, const Tensor & indices, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx); // {"schema": "aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _embedding_bag_per_sample_weights_backward(const Tensor & grad, const Tensor & weight, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, int64_t mode, int64_t padding_idx); // {"schema": "aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor empty(IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor empty(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor empty_permuted(c10::SymIntArrayRef size, IntArrayRef physical_layout, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_empty(const Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_empty_strided(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_full(const Tensor & self, c10::SymIntArrayRef size, const Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_zeros(const Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_ones(const Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _empty_affine_quantized(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format); // {"schema": "aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _empty_per_channel_affine_quantized(c10::SymIntArrayRef size, const Tensor & scales, const Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor", "dispatch": "True", "default": "False"} +const Tensor & resize_(const Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format); // {"schema": "aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +const Tensor & _resize_output_(const Tensor & self, c10::SymIntArrayRef size, Device device); // {"schema": "aten::_resize_output_(Tensor(a!) self, SymInt[] size, Device device) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor empty_quantized(IntArrayRef size, const Tensor & qtensor, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & empty_out(c10::SymIntArrayRef size, c10::optional memory_format, Tensor & out); // {"schema": "aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor empty_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor empty_strided(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor erf(const Tensor & self); // {"schema": "aten::erf(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & erf_(Tensor & self); // {"schema": "aten::erf_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & erf_out(const Tensor & self, Tensor & out); // {"schema": "aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor erfc(const Tensor & self); // {"schema": "aten::erfc(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & erfc_(Tensor & self); // {"schema": "aten::erfc_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & erfc_out(const Tensor & self, Tensor & out); // {"schema": "aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor exp(const Tensor & self); // {"schema": "aten::exp(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & exp_(Tensor & self); // {"schema": "aten::exp_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & exp_out(const Tensor & self, Tensor & out); // {"schema": "aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor exp2(const Tensor & self); // {"schema": "aten::exp2(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & exp2_(Tensor & self); // {"schema": "aten::exp2_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & exp2_out(const Tensor & self, Tensor & out); // {"schema": "aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor expm1(const Tensor & self); // {"schema": "aten::expm1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & expm1_(Tensor & self); // {"schema": "aten::expm1_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & expm1_out(const Tensor & self, Tensor & out); // {"schema": "aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor expand(const Tensor & self, c10::SymIntArrayRef size, bool implicit); // {"schema": "aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor expand_as(const Tensor & self, const Tensor & other); // {"schema": "aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor eye(c10::SymInt n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor eye(c10::SymInt n, c10::SymInt m, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & eye_out(c10::SymInt n, Tensor & out); // {"schema": "aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & eye_out(c10::SymInt n, c10::SymInt m, Tensor & out); // {"schema": "aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor flatten(const Tensor & self, int64_t start_dim, int64_t end_dim); // {"schema": "aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor flatten(const Tensor & self, int64_t start_dim, int64_t end_dim, Dimname out_dim); // {"schema": "aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor flatten(const Tensor & self, Dimname start_dim, Dimname end_dim, Dimname out_dim); // {"schema": "aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor flatten(const Tensor & self, DimnameList dims, Dimname out_dim); // {"schema": "aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor unflatten(const Tensor & self, int64_t dim, c10::SymIntArrayRef sizes); // {"schema": "aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor unflatten(const Tensor & self, Dimname dim, c10::SymIntArrayRef sizes, DimnameList names); // {"schema": "aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor fill(const Tensor & self, const Scalar & value); // {"schema": "aten::fill.Scalar(Tensor self, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor fill(const Tensor & self, const Tensor & value); // {"schema": "aten::fill.Tensor(Tensor self, Tensor value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fill_(Tensor & self, const Scalar & value); // {"schema": "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & fill_(Tensor & self, const Tensor & value); // {"schema": "aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor floor(const Tensor & self); // {"schema": "aten::floor(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & floor_(Tensor & self); // {"schema": "aten::floor_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & floor_out(const Tensor & self, Tensor & out); // {"schema": "aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor floor_divide(const Tensor & self, const Tensor & other); // {"schema": "aten::floor_divide(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & floor_divide_(Tensor & self, const Tensor & other); // {"schema": "aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & floor_divide_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor floor_divide(const Tensor & self, const Scalar & other); // {"schema": "aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & floor_divide_(Tensor & self, const Scalar & other); // {"schema": "aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor frac(const Tensor & self); // {"schema": "aten::frac(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & frac_(Tensor & self); // {"schema": "aten::frac_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & frac_out(const Tensor & self, Tensor & out); // {"schema": "aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor full(IntArrayRef size, const Scalar & fill_value, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor full(c10::SymIntArrayRef size, const Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & full_out(c10::SymIntArrayRef size, const Scalar & fill_value, Tensor & out); // {"schema": "aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor full_like(const Tensor & self, const Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor from_file(c10::string_view filename, c10::optional shared, c10::optional size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & gcd_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gcd(const Tensor & self, const Tensor & other); // {"schema": "aten::gcd(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & gcd_(Tensor & self, const Tensor & other); // {"schema": "aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lcm_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lcm(const Tensor & self, const Tensor & other); // {"schema": "aten::lcm(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & lcm_(Tensor & self, const Tensor & other); // {"schema": "aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor grid_sampler(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", "dispatch": "False", "default": "True"} +Tensor grid_sampler_2d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple grid_sampler_2d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); // {"schema": "aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _grid_sampler_2d_cpu_fallback(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple _grid_sampler_2d_cpu_fallback_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor grid_sampler_3d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple grid_sampler_3d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); // {"schema": "aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor hann_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hann_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hamming_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hamming_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hamming_window(int64_t window_length, bool periodic, double alpha, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor kaiser_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor kaiser_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor kaiser_window(int64_t window_length, bool periodic, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hinge_embedding_loss(const Tensor & self, const Tensor & target, double margin, int64_t reduction); // {"schema": "aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor group_norm(const Tensor & input, int64_t num_groups, const c10::optional & weight, const c10::optional & bias, double eps, bool cudnn_enabled); // {"schema": "aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple native_group_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps); // {"schema": "aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "True"} +::std::tuple native_group_norm_backward(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask); // {"schema": "aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _fft_r2c(const Tensor & self, IntArrayRef dim, int64_t normalization, bool onesided); // {"schema": "aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _fft_r2c_out(const Tensor & self, IntArrayRef dim, int64_t normalization, bool onesided, Tensor & out); // {"schema": "aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _fft_c2r(const Tensor & self, IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size); // {"schema": "aten::_fft_c2r(Tensor self, int[] dim, int normalization, SymInt last_dim_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _fft_c2r_out(const Tensor & self, IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size, Tensor & out); // {"schema": "aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, SymInt last_dim_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _fft_c2c(const Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward); // {"schema": "aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _fft_c2c_out(const Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, Tensor & out); // {"schema": "aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +void _validate_compressed_sparse_indices(bool is_crow, const Tensor & compressed_idx, const Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz); // {"schema": "aten::_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()", "dispatch": "True", "default": "False"} +int64_t _cufft_get_plan_cache_size(DeviceIndex device_index); // {"schema": "aten::_cufft_get_plan_cache_size(DeviceIndex device_index) -> int", "dispatch": "False", "default": "True"} +int64_t _cufft_get_plan_cache_max_size(DeviceIndex device_index); // {"schema": "aten::_cufft_get_plan_cache_max_size(DeviceIndex device_index) -> int", "dispatch": "False", "default": "True"} +void _cufft_set_plan_cache_max_size(DeviceIndex device_index, int64_t max_size); // {"schema": "aten::_cufft_set_plan_cache_max_size(DeviceIndex device_index, int max_size) -> ()", "dispatch": "False", "default": "True"} +void _cufft_clear_plan_cache(DeviceIndex device_index); // {"schema": "aten::_cufft_clear_plan_cache(DeviceIndex device_index) -> ()", "dispatch": "False", "default": "True"} +Tensor index(const Tensor & self, const c10::List> & indices); // {"schema": "aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_out(const Tensor & self, const c10::List> & indices, Tensor & out); // {"schema": "aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _unsafe_index(const Tensor & self, const c10::List> & indices); // {"schema": "aten::_unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_copy_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, Tensor & out); // {"schema": "aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); // {"schema": "aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor index_copy(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); // {"schema": "aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_copy_(Tensor & self, Dimname dim, const Tensor & index, const Tensor & source); // {"schema": "aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor index_copy(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source); // {"schema": "aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & index_put_(Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate); // {"schema": "aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor index_put(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate); // {"schema": "aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _unsafe_index_put(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate); // {"schema": "aten::_unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _index_put_impl_(Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate, bool unsafe); // {"schema": "aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor instance_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); // {"schema": "aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor", "dispatch": "False", "default": "True"} +Tensor isclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan); // {"schema": "aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & isin_out(const Tensor & elements, const Tensor & test_elements, bool assume_unique, bool invert, Tensor & out); // {"schema": "aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor isin(const Tensor & elements, const Tensor & test_elements, bool assume_unique, bool invert); // {"schema": "aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isin_out(const Tensor & elements, const Scalar & test_element, bool assume_unique, bool invert, Tensor & out); // {"schema": "aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor isin(const Tensor & elements, const Scalar & test_element, bool assume_unique, bool invert); // {"schema": "aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isin_out(const Scalar & element, const Tensor & test_elements, bool assume_unique, bool invert, Tensor & out); // {"schema": "aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor isin(const Scalar & element, const Tensor & test_elements, bool assume_unique, bool invert); // {"schema": "aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor isnan(const Tensor & self); // {"schema": "aten::isnan(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +bool is_distributed(const Tensor & self); // {"schema": "aten::is_distributed(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_floating_point(const Tensor & self); // {"schema": "aten::is_floating_point(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_complex(const Tensor & self); // {"schema": "aten::is_complex(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_conj(const Tensor & self); // {"schema": "aten::is_conj(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool _is_zerotensor(const Tensor & self); // {"schema": "aten::_is_zerotensor(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_neg(const Tensor & self); // {"schema": "aten::is_neg(Tensor self) -> bool", "dispatch": "False", "default": "True"} +Tensor isreal(const Tensor & self); // {"schema": "aten::isreal(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +bool is_nonzero(const Tensor & self); // {"schema": "aten::is_nonzero(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_same_size(const Tensor & self, const Tensor & other); // {"schema": "aten::is_same_size(Tensor self, Tensor other) -> bool", "dispatch": "True", "default": "True"} +bool is_signed(const Tensor & self); // {"schema": "aten::is_signed(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_inference(const Tensor & self); // {"schema": "aten::is_inference(Tensor self) -> bool", "dispatch": "False", "default": "True"} +Tensor kl_div(const Tensor & self, const Tensor & target, int64_t reduction, bool log_target); // {"schema": "aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor kron(const Tensor & self, const Tensor & other); // {"schema": "aten::kron(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & kron_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim); // {"schema": "aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple kthvalue_out(const Tensor & self, int64_t k, int64_t dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple kthvalue(const Tensor & self, int64_t k, Dimname dim, bool keepdim); // {"schema": "aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple kthvalue_out(const Tensor & self, int64_t k, Dimname dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor layer_norm(const Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps, bool cudnn_enable); // {"schema": "aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple native_layer_norm(const Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps); // {"schema": "aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "True"} +::std::tuple native_layer_norm_backward(const Tensor & grad_out, const Tensor & input, c10::SymIntArrayRef normalized_shape, const Tensor & mean, const Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask); // {"schema": "aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor nan_to_num(const Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf); // {"schema": "aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & nan_to_num_(Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf); // {"schema": "aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & nan_to_num_out(const Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf, Tensor & out); // {"schema": "aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor linear(const Tensor & input, const Tensor & weight, const c10::optional & bias); // {"schema": "aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple linear_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, ::std::array output_mask); // {"schema": "aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & linear_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, Tensor & out); // {"schema": "aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor mkldnn_linear(const Tensor & self, const Tensor & weight, const c10::optional & bias); // {"schema": "aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_linear_backward_input(IntArrayRef input_size, const Tensor & grad_output, const Tensor & weight); // {"schema": "aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple mkldnn_linear_backward_weights(const Tensor & grad_output, const Tensor & input, const Tensor & weight, bool bias_defined); // {"schema": "aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple mkldnn_linear_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, ::std::array output_mask); // {"schema": "aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _cslt_compress(const Tensor & input); // {"schema": "aten::_cslt_compress(Tensor input) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _cslt_sparse_mm(const Tensor & compressed_A, const Tensor & dense_B, const c10::optional & bias, const c10::optional & alpha, c10::optional out_dtype, bool transpose_result, int64_t alg_id); // {"schema": "aten::_cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0) -> Tensor", "dispatch": "True", "default": "False"} +int64_t _cslt_sparse_mm_search(const Tensor & compressed_A, const Tensor & dense_B, const c10::optional & bias, const c10::optional & alpha, c10::optional out_dtype, bool transpose_result); // {"schema": "aten::_cslt_sparse_mm_search(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False) -> int", "dispatch": "True", "default": "False"} +Tensor _sparse_semi_structured_linear(const Tensor & input, const Tensor & weight, const Tensor & meta, const c10::optional & bias, c10::optional activation, c10::optional out_dtype); // {"schema": "aten::_sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _mixed_dtypes_linear(const Tensor & input, const Tensor & weight, const Tensor & scale, const c10::optional & bias, c10::optional activation); // {"schema": "aten::_mixed_dtypes_linear(Tensor input, Tensor weight, Tensor scale, *, Tensor? bias=None, str? activation=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor fbgemm_linear_int8_weight_fp32_activation(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, const Scalar & weight_scale, const Scalar & weight_zero_point, const Tensor & bias); // {"schema": "aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_linear_int8_weight(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, const Scalar & weight_scale, const Scalar & weight_zero_point, const Tensor & bias); // {"schema": "aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple fbgemm_linear_quantize_weight(const Tensor & input); // {"schema": "aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)", "dispatch": "False", "default": "True"} +Tensor fbgemm_pack_gemm_matrix_fp16(const Tensor & input); // {"schema": "aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_linear_fp16_weight_fp32_activation(const Tensor & input, const Tensor & packed_weight, const Tensor & bias); // {"schema": "aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_linear_fp16_weight(const Tensor & input, const Tensor & packed_weight, const Tensor & bias); // {"schema": "aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_pack_quantized_matrix(const Tensor & input); // {"schema": "aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_pack_quantized_matrix(const Tensor & input, int64_t K, int64_t N); // {"schema": "aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor", "dispatch": "False", "default": "True"} +Tensor ldexp(const Tensor & self, const Tensor & other); // {"schema": "aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & ldexp_(Tensor & self, const Tensor & other); // {"schema": "aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & ldexp_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linspace(const Scalar & start, const Scalar & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor linspace(const Tensor & start, const Tensor & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor linspace(const Tensor & start, const Scalar & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor linspace(const Scalar & start, const Tensor & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linspace_out(const Scalar & start, const Scalar & end, int64_t steps, Tensor & out); // {"schema": "aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & linspace_out(const Tensor & start, const Tensor & end, int64_t steps, Tensor & out); // {"schema": "aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & linspace_out(const Tensor & start, const Scalar & end, int64_t steps, Tensor & out); // {"schema": "aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & linspace_out(const Scalar & start, const Tensor & end, int64_t steps, Tensor & out); // {"schema": "aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor log(const Tensor & self); // {"schema": "aten::log(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log_(Tensor & self); // {"schema": "aten::log_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log_out(const Tensor & self, Tensor & out); // {"schema": "aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log10(const Tensor & self); // {"schema": "aten::log10(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log10_(Tensor & self); // {"schema": "aten::log10_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log10_out(const Tensor & self, Tensor & out); // {"schema": "aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log1p(const Tensor & self); // {"schema": "aten::log1p(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log1p_(Tensor & self); // {"schema": "aten::log1p_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log1p_out(const Tensor & self, Tensor & out); // {"schema": "aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log2(const Tensor & self); // {"schema": "aten::log2(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log2_(Tensor & self); // {"schema": "aten::log2_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log2_out(const Tensor & self, Tensor & out); // {"schema": "aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & logaddexp_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logaddexp(const Tensor & self, const Tensor & other); // {"schema": "aten::logaddexp(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logaddexp2_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logaddexp2(const Tensor & self, const Tensor & other); // {"schema": "aten::logaddexp2(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor xlogy(const Tensor & self, const Tensor & other); // {"schema": "aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor xlogy(const Scalar & self, const Tensor & other); // {"schema": "aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor xlogy(const Tensor & self, const Scalar & other); // {"schema": "aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & xlogy_(Tensor & self, const Tensor & other); // {"schema": "aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & xlogy_(Tensor & self, const Scalar & other); // {"schema": "aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & xlogy_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & xlogy_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & xlogy_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor logspace(const Scalar & start, const Scalar & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor logspace(const Tensor & start, const Tensor & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor logspace(const Tensor & start, const Scalar & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor logspace(const Scalar & start, const Tensor & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logspace_out(const Scalar & start, const Scalar & end, int64_t steps, double base, Tensor & out); // {"schema": "aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & logspace_out(const Tensor & start, const Tensor & end, int64_t steps, double base, Tensor & out); // {"schema": "aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logspace_out(const Tensor & start, const Scalar & end, int64_t steps, double base, Tensor & out); // {"schema": "aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logspace_out(const Scalar & start, const Tensor & end, int64_t steps, double base, Tensor & out); // {"schema": "aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor log_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & log_softmax_out(const Tensor & self, int64_t dim, c10::optional dtype, Tensor & out); // {"schema": "aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor log_softmax(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _log_softmax(const Tensor & self, int64_t dim, bool half_to_float); // {"schema": "aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _log_softmax_out(const Tensor & self, int64_t dim, bool half_to_float, Tensor & out); // {"schema": "aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _log_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, ScalarType input_dtype); // {"schema": "aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _log_softmax_backward_data_out(const Tensor & grad_output, const Tensor & output, int64_t dim, ScalarType input_dtype, Tensor & out); // {"schema": "aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _logcumsumexp(const Tensor & self, int64_t dim); // {"schema": "aten::_logcumsumexp(Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _logcumsumexp_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logcumsumexp(const Tensor & self, int64_t dim); // {"schema": "aten::logcumsumexp(Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logcumsumexp_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor logcumsumexp(const Tensor & self, Dimname dim); // {"schema": "aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & logcumsumexp_out(const Tensor & self, Dimname dim, Tensor & out); // {"schema": "aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor logsumexp(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logsumexp_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor logsumexp(const Tensor & self, DimnameList dim, bool keepdim); // {"schema": "aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & logsumexp_out(const Tensor & self, DimnameList dim, bool keepdim, Tensor & out); // {"schema": "aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor margin_ranking_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction); // {"schema": "aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor matmul(const Tensor & self, const Tensor & other); // {"schema": "aten::matmul(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple matmul_backward(const Tensor & grad, const Tensor & self, const Tensor & other, ::std::array mask); // {"schema": "aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & matmul_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor matrix_power(const Tensor & self, int64_t n); // {"schema": "aten::matrix_power(Tensor self, int n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & matrix_power_out(const Tensor & self, int64_t n, Tensor & out); // {"schema": "aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor matrix_exp(const Tensor & self); // {"schema": "aten::matrix_exp(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor matrix_exp_backward(const Tensor & self, const Tensor & grad); // {"schema": "aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _aminmax(const Tensor & self); // {"schema": "aten::_aminmax(Tensor self) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _aminmax(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple aminmax(const Tensor & self, c10::optional dim, bool keepdim); // {"schema": "aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)", "dispatch": "True", "default": "True"} +::std::tuple aminmax_out(const Tensor & self, c10::optional dim, bool keepdim, Tensor & min, Tensor & max); // {"schema": "aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)", "dispatch": "True", "default": "False"} +Tensor _compute_linear_combination(const Tensor & input, const Tensor & coefficients); // {"schema": "aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _compute_linear_combination_out(const Tensor & input, const Tensor & coefficients, Tensor & out); // {"schema": "aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple max(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple max_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & max, Tensor & max_values); // {"schema": "aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple max(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple max_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & max, Tensor & max_values); // {"schema": "aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor value_selecting_reduction_backward(const Tensor & grad, int64_t dim, const Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim); // {"schema": "aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor amax(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & amax_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple max_pool1d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor max_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_max_pool2d_backward(const Tensor & grad_output, const Tensor & output, const Tensor & input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_max_pool3d_backward(const Tensor & grad_output, const Tensor & output, const Tensor & input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantized_max_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantized_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantized_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor mean(const Tensor & self, c10::optional dtype); // {"schema": "aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor mean(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mean_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mean(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype); // {"schema": "aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & mean_out(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nanmean(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nanmean_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor median(const Tensor & self); // {"schema": "aten::median(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple median(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple median_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple median(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple median_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor nanmedian(const Tensor & self); // {"schema": "aten::nanmedian(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple nanmedian(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple nanmedian_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple nanmedian(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple nanmedian_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +::std::tuple min(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple min_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & min, Tensor & min_indices); // {"schema": "aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple min(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple min_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & min, Tensor & min_indices); // {"schema": "aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor amin(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & amin_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _mps_convolution(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple mps_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask); // {"schema": "aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor mkldnn_convolution(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple mkldnn_rnn_layer(const Tensor & input, const Tensor & weight0, const Tensor & weight1, const Tensor & weight2, const Tensor & weight3, const Tensor & hx_, const Tensor & cx_, bool reverse, IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train); // {"schema": "aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple mkldnn_rnn_layer_backward(const Tensor & input, const Tensor & weight1, const Tensor & weight2, const Tensor & weight3, const Tensor & weight4, const Tensor & hx_, const Tensor & cx_tmp, const Tensor & output, const Tensor & hy_, const Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, IntArrayRef batch_sizes, bool batch_first, const Tensor & workspace); // {"schema": "aten::mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple miopen_batch_norm(const Tensor & input, const Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); // {"schema": "aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple miopen_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon); // {"schema": "aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor miopen_convolution(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic); // {"schema": "aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor", "dispatch": "True", "default": "False"} +Tensor miopen_convolution_transpose(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic); // {"schema": "aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor", "dispatch": "True", "default": "False"} +Tensor miopen_depthwise_convolution(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic); // {"schema": "aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor", "dispatch": "True", "default": "False"} +Tensor miopen_convolution_relu(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +Tensor miopen_convolution_add_relu(const Tensor & self, const Tensor & weight, const Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple miopen_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional & dropout_state); // {"schema": "aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple> miopen_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const c10::optional & cx, const Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional & dropout_state, const Tensor & reserve, ::std::array output_mask); // {"schema": "aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])", "dispatch": "True", "default": "False"} +Tensor mm(const Tensor & self, const Tensor & mat2); // {"schema": "aten::mm(Tensor self, Tensor mat2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mm_out(const Tensor & self, const Tensor & mat2, Tensor & out); // {"schema": "aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _int_mm(const Tensor & self, const Tensor & mat2); // {"schema": "aten::_int_mm(Tensor self, Tensor mat2) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _int_mm_out(const Tensor & self, const Tensor & mat2, Tensor & out); // {"schema": "aten::_int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _convert_weight_to_int4pack(const Tensor & self, int64_t innerKTiles); // {"schema": "aten::_convert_weight_to_int4pack(Tensor self, int innerKTiles) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _weight_int4pack_mm(const Tensor & self, const Tensor & mat2, int64_t qGroupSize, const Tensor & qScaleAndZeros); // {"schema": "aten::_weight_int4pack_mm(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _weight_int8pack_mm(const Tensor & self, const Tensor & mat2, const Tensor & scales); // {"schema": "aten::_weight_int8pack_mm(Tensor self, Tensor mat2, Tensor scales) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_mm(const Tensor & sparse, const Tensor & dense); // {"schema": "aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_mm(const Tensor & sparse, const Tensor & dense, c10::string_view reduce); // {"schema": "aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_sparse_matmul(const Tensor & self, const Tensor & other); // {"schema": "aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple mode(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "False"} +::std::tuple mode_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "True"} +::std::tuple mode(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple mode_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor mul(const Tensor & self, const Tensor & other); // {"schema": "aten::mul.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mul_(Tensor & self, const Tensor & other); // {"schema": "aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mul_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mul(const Tensor & self, const Scalar & other); // {"schema": "aten::mul.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mul_(Tensor & self, const Scalar & other); // {"schema": "aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor multiply(const Tensor & self, const Tensor & other); // {"schema": "aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & multiply_(Tensor & self, const Tensor & other); // {"schema": "aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & multiply_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor multiply(const Tensor & self, const Scalar & other); // {"schema": "aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & multiply_(Tensor & self, const Scalar & other); // {"schema": "aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor mv(const Tensor & self, const Tensor & vec); // {"schema": "aten::mv(Tensor self, Tensor vec) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mv_out(const Tensor & self, const Tensor & vec, Tensor & out); // {"schema": "aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mvlgamma_out(const Tensor & self, int64_t p, Tensor & out); // {"schema": "aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mvlgamma(const Tensor & self, int64_t p); // {"schema": "aten::mvlgamma(Tensor self, int p) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mvlgamma_(Tensor & self, int64_t p); // {"schema": "aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor narrow_copy(const Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); // {"schema": "aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & narrow_copy_out(const Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, Tensor & out); // {"schema": "aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor narrow(const Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); // {"schema": "aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor narrow(const Tensor & self, int64_t dim, const Tensor & start, c10::SymInt length); // {"schema": "aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)", "dispatch": "False", "default": "True"} +::std::tuple native_batch_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps); // {"schema": "aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple native_batch_norm_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, Tensor & out, Tensor & save_mean, Tensor & save_invstd); // {"schema": "aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "False"} +::std::tuple _native_batch_norm_legit(const Tensor & input, const c10::optional & weight, const c10::optional & bias, Tensor & running_mean, Tensor & running_var, bool training, double momentum, double eps); // {"schema": "aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _native_batch_norm_legit_no_training(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & running_mean, const Tensor & running_var, double momentum, double eps); // {"schema": "aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "True"} +::std::tuple _native_batch_norm_legit_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, Tensor & running_mean, Tensor & running_var, bool training, double momentum, double eps, Tensor & out, Tensor & save_mean, Tensor & save_invstd); // {"schema": "aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))", "dispatch": "True", "default": "False"} +::std::tuple _native_batch_norm_legit(const Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps); // {"schema": "aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _native_batch_norm_legit_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps, Tensor & out, Tensor & save_mean, Tensor & save_invstd); // {"schema": "aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_stats(const Tensor & input, double eps); // {"schema": "aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor batch_norm_elemt(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & mean, const Tensor & invstd, double eps); // {"schema": "aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & batch_norm_elemt_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & mean, const Tensor & invstd, double eps, Tensor & out); // {"schema": "aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_gather_stats(const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, int64_t count); // {"schema": "aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_gather_stats_with_counts(const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const Tensor & counts); // {"schema": "aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple native_batch_norm_backward(const Tensor & grad_out, const Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask); // {"schema": "aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_backward_reduce(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g); // {"schema": "aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor batch_norm_backward_elemt(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & weight, const Tensor & sum_dy, const Tensor & sum_dy_xmu, const Tensor & count); // {"schema": "aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_update_stats(const Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum); // {"schema": "aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +bool is_vulkan_available(); // {"schema": "aten::is_vulkan_available() -> bool", "dispatch": "False", "default": "True"} +bool _nnpack_available(); // {"schema": "aten::_nnpack_available() -> bool", "dispatch": "False", "default": "True"} +Tensor _nnpack_spatial_convolution(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride); // {"schema": "aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor ones(IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor ones(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ones_out(c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor ones_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor pairwise_distance(const Tensor & x1, const Tensor & x2, double p, double eps, bool keepdim); // {"schema": "aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cdist(const Tensor & x1, const Tensor & x2, double p, c10::optional compute_mode); // {"schema": "aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _euclidean_dist(const Tensor & x1, const Tensor & x2); // {"schema": "aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _cdist_forward(const Tensor & x1, const Tensor & x2, double p, c10::optional compute_mode); // {"schema": "aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _cdist_backward(const Tensor & grad, const Tensor & x1, const Tensor & x2, double p, const Tensor & cdist); // {"schema": "aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor", "dispatch": "True", "default": "False"} +Tensor pdist(const Tensor & self, double p); // {"schema": "aten::pdist(Tensor self, float p=2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _pdist_forward(const Tensor & self, double p); // {"schema": "aten::_pdist_forward(Tensor self, float p=2) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _pdist_backward(const Tensor & grad, const Tensor & self, double p, const Tensor & pdist); // {"schema": "aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cosine_similarity(const Tensor & x1, const Tensor & x2, int64_t dim, double eps); // {"schema": "aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor", "dispatch": "False", "default": "True"} +Tensor permute(const Tensor & self, IntArrayRef dims); // {"schema": "aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor movedim(const Tensor & self, IntArrayRef source, IntArrayRef destination); // {"schema": "aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor movedim(const Tensor & self, int64_t source, int64_t destination); // {"schema": "aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor moveaxis(const Tensor & self, IntArrayRef source, IntArrayRef destination); // {"schema": "aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor moveaxis(const Tensor & self, int64_t source, int64_t destination); // {"schema": "aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor numpy_T(const Tensor & self); // {"schema": "aten::numpy_T(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor matrix_H(const Tensor & self); // {"schema": "aten::matrix_H(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor mT(const Tensor & self); // {"schema": "aten::mT(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor mH(const Tensor & self); // {"schema": "aten::mH(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor adjoint(const Tensor & self); // {"schema": "aten::adjoint(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor pixel_shuffle(const Tensor & self, int64_t upscale_factor); // {"schema": "aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor", "dispatch": "True", "default": "True"} +Tensor pixel_unshuffle(const Tensor & self, int64_t downscale_factor); // {"schema": "aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor", "dispatch": "True", "default": "True"} +Tensor channel_shuffle(const Tensor & self, c10::SymInt groups); // {"schema": "aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor", "dispatch": "True", "default": "False"} +Tensor native_channel_shuffle(const Tensor & self, c10::SymInt groups); // {"schema": "aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor", "dispatch": "True", "default": "True"} +bool is_pinned(const Tensor & self, c10::optional device); // {"schema": "aten::is_pinned(Tensor self, Device? device=None) -> bool", "dispatch": "True", "default": "True"} +Tensor pin_memory(const Tensor & self, c10::optional device); // {"schema": "aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _pin_memory(const Tensor & self, c10::optional device); // {"schema": "aten::_pin_memory(Tensor self, Device? device=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor pinverse(const Tensor & self, double rcond); // {"schema": "aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor", "dispatch": "False", "default": "True"} +Tensor poisson_nll_loss(const Tensor & input, const Tensor & target, bool log_input, bool full, double eps, int64_t reduction); // {"schema": "aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor", "dispatch": "False", "default": "True"} +Tensor rad2deg(const Tensor & self); // {"schema": "aten::rad2deg(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & rad2deg_(Tensor & self); // {"schema": "aten::rad2deg_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rad2deg_out(const Tensor & self, Tensor & out); // {"schema": "aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor deg2rad(const Tensor & self); // {"schema": "aten::deg2rad(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & deg2rad_(Tensor & self); // {"schema": "aten::deg2rad_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & deg2rad_out(const Tensor & self, Tensor & out); // {"schema": "aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor scalar_tensor(const Scalar & s, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rand(c10::SymIntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rand(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rand(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rand(c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & rand_out(c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rand_out(c10::SymIntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor rand_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint(c10::SymInt high, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint(c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & randint_out(c10::SymInt high, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_out(c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_out(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_out(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor randint_like(const Tensor & self, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint_like(const Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randn(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randn(c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randn(c10::SymIntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randn(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & randn_out(c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & randn_out(c10::SymIntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor randn_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randperm(c10::SymInt n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randperm(c10::SymInt n, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & randperm_out(c10::SymInt n, Tensor & out); // {"schema": "aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randperm_out(c10::SymInt n, c10::optional generator, Tensor & out); // {"schema": "aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor range(const Scalar & start, const Scalar & end, const Scalar & step, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor range(const Scalar & start, const Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & range_out(const Scalar & start, const Scalar & end, Tensor & out); // {"schema": "aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & range_out(const Scalar & start, const Scalar & end, const Scalar & step, Tensor & out); // {"schema": "aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ravel(const Tensor & self); // {"schema": "aten::ravel(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor reciprocal(const Tensor & self); // {"schema": "aten::reciprocal(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & reciprocal_(Tensor & self); // {"schema": "aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & reciprocal_out(const Tensor & self, Tensor & out); // {"schema": "aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor neg(const Tensor & self); // {"schema": "aten::neg(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & neg_(Tensor & self); // {"schema": "aten::neg_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & neg_out(const Tensor & self, Tensor & out); // {"schema": "aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor negative(const Tensor & self); // {"schema": "aten::negative(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & negative_(Tensor & self); // {"schema": "aten::negative_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & negative_out(const Tensor & self, Tensor & out); // {"schema": "aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor repeat(const Tensor & self, c10::SymIntArrayRef repeats); // {"schema": "aten::repeat(Tensor self, SymInt[] repeats) -> Tensor", "dispatch": "True", "default": "True"} +Tensor repeat_interleave(const Tensor & repeats, c10::optional output_size); // {"schema": "aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor repeat_interleave(const Tensor & self, const Tensor & repeats, c10::optional dim, c10::optional output_size); // {"schema": "aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor repeat_interleave(const Tensor & self, c10::SymInt repeats, c10::optional dim, c10::optional output_size); // {"schema": "aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor reshape(const Tensor & self, c10::SymIntArrayRef shape); // {"schema": "aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _reshape_copy(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _reshape_alias(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor _mkldnn_reshape(const Tensor & self, IntArrayRef shape); // {"schema": "aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor", "dispatch": "True", "default": "False"} +Tensor reshape_as(const Tensor & self, const Tensor & other); // {"schema": "aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor round(const Tensor & self); // {"schema": "aten::round(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & round_(Tensor & self); // {"schema": "aten::round_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & round_out(const Tensor & self, Tensor & out); // {"schema": "aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor round(const Tensor & self, int64_t decimals); // {"schema": "aten::round.decimals(Tensor self, *, int decimals) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & round_(Tensor & self, int64_t decimals); // {"schema": "aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & round_out(const Tensor & self, int64_t decimals, Tensor & out); // {"schema": "aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor rrelu(const Tensor & self, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator); // {"schema": "aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & rrelu_(Tensor & self, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator); // {"schema": "aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor relu(const Tensor & self); // {"schema": "aten::relu(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & relu_(Tensor & self); // {"schema": "aten::relu_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor relu6(const Tensor & self); // {"schema": "aten::relu6(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & relu6_(Tensor & self); // {"schema": "aten::relu6_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor prelu(const Tensor & self, const Tensor & weight); // {"schema": "aten::prelu(Tensor self, Tensor weight) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _prelu_kernel(const Tensor & self, const Tensor & weight); // {"schema": "aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _prelu_kernel_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight); // {"schema": "aten::_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & gelu_out(const Tensor & self, c10::string_view approximate, Tensor & out); // {"schema": "aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & gelu_(Tensor & self, c10::string_view approximate); // {"schema": "aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor gelu(const Tensor & self, c10::string_view approximate); // {"schema": "aten::gelu(Tensor self, *, str approximate='none') -> Tensor", "dispatch": "True", "default": "True"} +Tensor & gelu_backward_out(const Tensor & grad_output, const Tensor & self, c10::string_view approximate, Tensor & grad_input); // {"schema": "aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gelu_backward(const Tensor & grad_output, const Tensor & self, c10::string_view approximate); // {"schema": "aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor", "dispatch": "True", "default": "True"} +Tensor infinitely_differentiable_gelu_backward(const Tensor & grad, const Tensor & self); // {"schema": "aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & hardshrink_out(const Tensor & self, const Scalar & lambd, Tensor & out); // {"schema": "aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardshrink(const Tensor & self, const Scalar & lambd); // {"schema": "aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & hardshrink_backward_out(const Tensor & grad_out, const Tensor & self, const Scalar & lambd, Tensor & grad_input); // {"schema": "aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardshrink_backward(const Tensor & grad_out, const Tensor & self, const Scalar & lambd); // {"schema": "aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rsqrt(const Tensor & self); // {"schema": "aten::rsqrt(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & rsqrt_(Tensor & self); // {"schema": "aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rsqrt_out(const Tensor & self, Tensor & out); // {"schema": "aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor select(const Tensor & self, Dimname dim, int64_t index); // {"schema": "aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor select(const Tensor & self, int64_t dim, c10::SymInt index); // {"schema": "aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor select_backward(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index); // {"schema": "aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _nested_select_backward(const Tensor & grad_output, const Tensor & self, int64_t dim, c10::SymInt index); // {"schema": "aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor", "dispatch": "True", "default": "False"} +Tensor selu(const Tensor & self); // {"schema": "aten::selu(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & selu_(Tensor & self); // {"schema": "aten::selu_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor celu(const Tensor & self, const Scalar & alpha); // {"schema": "aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & celu_(Tensor & self, const Scalar & alpha); // {"schema": "aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor silu(const Tensor & self); // {"schema": "aten::silu(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & silu_(Tensor & self); // {"schema": "aten::silu_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & silu_out(const Tensor & self, Tensor & out); // {"schema": "aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & silu_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & grad_input); // {"schema": "aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor silu_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor mish(const Tensor & self); // {"schema": "aten::mish(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mish_(Tensor & self); // {"schema": "aten::mish_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mish_out(const Tensor & self, Tensor & out); // {"schema": "aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mish_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sigmoid(const Tensor & self); // {"schema": "aten::sigmoid(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sigmoid_(Tensor & self); // {"schema": "aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sigmoid_out(const Tensor & self, Tensor & out); // {"schema": "aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logit(const Tensor & self, c10::optional eps); // {"schema": "aten::logit(Tensor self, float? eps=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & logit_(Tensor & self, c10::optional eps); // {"schema": "aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & logit_out(const Tensor & self, c10::optional eps, Tensor & out); // {"schema": "aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sin(const Tensor & self); // {"schema": "aten::sin(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sin_(Tensor & self); // {"schema": "aten::sin_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sin_out(const Tensor & self, Tensor & out); // {"schema": "aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sinc(const Tensor & self); // {"schema": "aten::sinc(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sinc_(Tensor & self); // {"schema": "aten::sinc_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sinc_out(const Tensor & self, Tensor & out); // {"schema": "aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sinh(const Tensor & self); // {"schema": "aten::sinh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sinh_(Tensor & self); // {"schema": "aten::sinh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sinh_out(const Tensor & self, Tensor & out); // {"schema": "aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor detach(const Tensor & self); // {"schema": "aten::detach(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & detach_(Tensor & self); // {"schema": "aten::detach_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +int64_t size(const Tensor & self, int64_t dim); // {"schema": "aten::size.int(Tensor self, int dim) -> int", "dispatch": "False", "default": "True"} +int64_t size(const Tensor & self, Dimname dim); // {"schema": "aten::size.Dimname(Tensor self, Dimname dim) -> int", "dispatch": "False", "default": "True"} +c10::SymInt sym_size(const Tensor & self, int64_t dim); // {"schema": "aten::sym_size.int(Tensor self, int dim) -> SymInt", "dispatch": "False", "default": "True"} +c10::SymInt sym_numel(const Tensor & self); // {"schema": "aten::sym_numel(Tensor self) -> SymInt", "dispatch": "False", "default": "True"} +c10::SymInt sym_storage_offset(const Tensor & self); // {"schema": "aten::sym_storage_offset(Tensor self) -> SymInt", "dispatch": "False", "default": "True"} +Tensor slice(const Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); // {"schema": "aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor slice_backward(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step); // {"schema": "aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor", "dispatch": "True", "default": "True"} +Tensor slice_inverse(const Tensor & self, const Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); // {"schema": "aten::slice_inverse(Tensor(a) self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor slice_scatter(const Tensor & self, const Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); // {"schema": "aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor select_scatter(const Tensor & self, const Tensor & src, int64_t dim, c10::SymInt index); // {"schema": "aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor", "dispatch": "True", "default": "True"} +Tensor diagonal_scatter(const Tensor & self, const Tensor & src, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor as_strided_scatter(const Tensor & self, const Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); // {"schema": "aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor smm(const Tensor & self, const Tensor & mat2); // {"schema": "aten::smm(Tensor self, Tensor mat2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & softmax_out(const Tensor & self, int64_t dim, c10::optional dtype, Tensor & out); // {"schema": "aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor softmax(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _softmax(const Tensor & self, int64_t dim, bool half_to_float); // {"schema": "aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _softmax_out(const Tensor & self, int64_t dim, bool half_to_float, Tensor & out); // {"schema": "aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, ScalarType input_dtype); // {"schema": "aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _softmax_backward_data_out(const Tensor & grad_output, const Tensor & output, int64_t dim, ScalarType input_dtype, Tensor & grad_input); // {"schema": "aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::vector unsafe_split(const Tensor & self, c10::SymInt split_size, int64_t dim); // {"schema": "aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +::std::vector split(const Tensor & self, c10::SymInt split_size, int64_t dim); // {"schema": "aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]", "dispatch": "True", "default": "True"} +::std::vector split(const Tensor & self, c10::SymIntArrayRef split_size, int64_t dim); // {"schema": "aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector unsafe_split_with_sizes(const Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim); // {"schema": "aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +::std::vector split_with_sizes(const Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim); // {"schema": "aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]", "dispatch": "True", "default": "True"} +::std::vector hsplit(const Tensor & self, int64_t sections); // {"schema": "aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector hsplit(const Tensor & self, IntArrayRef indices); // {"schema": "aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector vsplit(const Tensor & self, int64_t sections); // {"schema": "aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector vsplit(const Tensor & self, IntArrayRef indices); // {"schema": "aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector dsplit(const Tensor & self, int64_t sections); // {"schema": "aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector dsplit(const Tensor & self, IntArrayRef indices); // {"schema": "aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +Tensor squeeze(const Tensor & self); // {"schema": "aten::squeeze(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor squeeze(const Tensor & self, int64_t dim); // {"schema": "aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor squeeze(const Tensor & self, Dimname dim); // {"schema": "aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor squeeze(const Tensor & self, IntArrayRef dim); // {"schema": "aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & squeeze_(Tensor & self); // {"schema": "aten::squeeze_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_(Tensor & self, int64_t dim); // {"schema": "aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_(Tensor & self, IntArrayRef dim); // {"schema": "aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_(Tensor & self, Dimname dim); // {"schema": "aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor sspaddmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & sspaddmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _chunk_cat(TensorList tensors, int64_t dim, int64_t num_chunks); // {"schema": "aten::_chunk_cat(Tensor[] tensors, int dim, int num_chunks) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _chunk_cat_out(TensorList tensors, int64_t dim, int64_t num_chunks, Tensor & out); // {"schema": "aten::_chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor stack(TensorList tensors, int64_t dim); // {"schema": "aten::stack(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & stack_out(TensorList tensors, int64_t dim, Tensor & out); // {"schema": "aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _stack(TensorList tensors, int64_t dim); // {"schema": "aten::_stack(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _stack_out(TensorList tensors, int64_t dim, Tensor & out); // {"schema": "aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor hstack(TensorList tensors); // {"schema": "aten::hstack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & hstack_out(TensorList tensors, Tensor & out); // {"schema": "aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor vstack(TensorList tensors); // {"schema": "aten::vstack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & vstack_out(TensorList tensors, Tensor & out); // {"schema": "aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor dstack(TensorList tensors); // {"schema": "aten::dstack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & dstack_out(TensorList tensors, Tensor & out); // {"schema": "aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor stft(const Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool normalized, c10::optional onesided, c10::optional return_complex); // {"schema": "aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor stft(const Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional onesided, c10::optional return_complex); // {"schema": "aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode=\"reflect\", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor istft(const Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool center, bool normalized, c10::optional onesided, c10::optional length, bool return_complex); // {"schema": "aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor", "dispatch": "False", "default": "True"} +int64_t stride(const Tensor & self, int64_t dim); // {"schema": "aten::stride.int(Tensor self, int dim) -> int", "dispatch": "False", "default": "True"} +int64_t stride(const Tensor & self, Dimname dim); // {"schema": "aten::stride.Dimname(Tensor self, Dimname dim) -> int", "dispatch": "False", "default": "True"} +c10::SymInt sym_stride(const Tensor & self, int64_t dim); // {"schema": "aten::sym_stride.int(Tensor self, int dim) -> SymInt", "dispatch": "False", "default": "True"} +Tensor sum(const Tensor & self, c10::optional dtype); // {"schema": "aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sum(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sum(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype); // {"schema": "aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & sum_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & sum_out(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor _nested_sum_backward(const Tensor & grad, const Tensor & self, OptionalIntArrayRef dim, bool keepdim); // {"schema": "aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor nansum(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & nansum_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sum_to_size(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sqrt(const Tensor & self); // {"schema": "aten::sqrt(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sqrt_(Tensor & self); // {"schema": "aten::sqrt_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sqrt_out(const Tensor & self, Tensor & out); // {"schema": "aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor square(const Tensor & self); // {"schema": "aten::square(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & square_(Tensor & self); // {"schema": "aten::square_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & square_out(const Tensor & self, Tensor & out); // {"schema": "aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor std(const Tensor & self, bool unbiased); // {"schema": "aten::std(Tensor self, bool unbiased=True) -> Tensor", "dispatch": "False", "default": "True"} +Tensor std(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim); // {"schema": "aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor std(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple std_mean(const Tensor & self, bool unbiased); // {"schema": "aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple std_mean(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim); // {"schema": "aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple std_mean(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple std_mean(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); // {"schema": "aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple std_mean(const Tensor & self, DimnameList dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor & std_out(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim, Tensor & out); // {"schema": "aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & std_out(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, Tensor & out); // {"schema": "aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor std(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); // {"schema": "aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & std_out(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim, Tensor & out); // {"schema": "aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor std(const Tensor & self, DimnameList dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & std_out(const Tensor & self, DimnameList dim, const c10::optional & correction, bool keepdim, Tensor & out); // {"schema": "aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor prod(const Tensor & self, c10::optional dtype); // {"schema": "aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor prod(const Tensor & self, int64_t dim, bool keepdim, c10::optional dtype); // {"schema": "aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & prod_out(const Tensor & self, int64_t dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor prod(const Tensor & self, Dimname dim, bool keepdim, c10::optional dtype); // {"schema": "aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & prod_out(const Tensor & self, Dimname dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor t(const Tensor & self); // {"schema": "aten::t(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & t_(Tensor & self); // {"schema": "aten::t_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor tan(const Tensor & self); // {"schema": "aten::tan(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tan_(Tensor & self); // {"schema": "aten::tan_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & tan_out(const Tensor & self, Tensor & out); // {"schema": "aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor tanh(const Tensor & self); // {"schema": "aten::tanh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tanh_(Tensor & self); // {"schema": "aten::tanh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & tanh_out(const Tensor & self, Tensor & out); // {"schema": "aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor tensordot(const Tensor & self, const Tensor & other, IntArrayRef dims_self, IntArrayRef dims_other); // {"schema": "aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & tensordot_out(const Tensor & self, const Tensor & other, IntArrayRef dims_self, IntArrayRef dims_other, Tensor & out); // {"schema": "aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor threshold(const Tensor & self, const Scalar & threshold, const Scalar & value); // {"schema": "aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & threshold_(Tensor & self, const Scalar & threshold, const Scalar & value); // {"schema": "aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & threshold_out(const Tensor & self, const Scalar & threshold, const Scalar & value, Tensor & out); // {"schema": "aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & threshold_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & threshold, Tensor & grad_input); // {"schema": "aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor threshold_backward(const Tensor & grad_output, const Tensor & self, const Scalar & threshold); // {"schema": "aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor", "dispatch": "True", "default": "True"} +Tensor tile(const Tensor & self, c10::SymIntArrayRef dims); // {"schema": "aten::tile(Tensor self, SymInt[] dims) -> Tensor", "dispatch": "False", "default": "True"} +Tensor transpose(const Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor transpose(const Tensor & self, Dimname dim0, Dimname dim1); // {"schema": "aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _mkldnn_transpose(const Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & transpose_(Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mkldnn_transpose_(Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor one_hot(const Tensor & self, int64_t num_classes); // {"schema": "aten::one_hot(Tensor self, int num_classes=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor flip(const Tensor & self, IntArrayRef dims); // {"schema": "aten::flip(Tensor self, int[] dims) -> Tensor", "dispatch": "True", "default": "False"} +Tensor fliplr(const Tensor & self); // {"schema": "aten::fliplr(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor flipud(const Tensor & self); // {"schema": "aten::flipud(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor roll(const Tensor & self, c10::SymIntArrayRef shifts, IntArrayRef dims); // {"schema": "aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor", "dispatch": "True", "default": "False"} +Tensor rot90(const Tensor & self, int64_t k, IntArrayRef dims); // {"schema": "aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor", "dispatch": "True", "default": "True"} +Tensor trapezoid(const Tensor & y, const Tensor & x, int64_t dim); // {"schema": "aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor trapezoid(const Tensor & y, const Scalar & dx, int64_t dim); // {"schema": "aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor trapz(const Tensor & y, const Tensor & x, int64_t dim); // {"schema": "aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor trapz(const Tensor & y, double dx, int64_t dim); // {"schema": "aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _transform_bias_rescale_qkv(const Tensor & qkv, const Tensor & qkv_bias, int64_t num_heads); // {"schema": "aten::_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_from_mask(const Tensor & t, const Tensor & mask, bool mask_check); // {"schema": "aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor", "dispatch": "True", "default": "False"} +bool _nested_tensor_from_mask_left_aligned(const Tensor & t, const Tensor & mask); // {"schema": "aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool", "dispatch": "True", "default": "False"} +Tensor _nested_from_padded(const Tensor & padded, const Tensor & cpu_nested_shape_example, bool fuse_transform_0213); // {"schema": "aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_size(const Tensor & self); // {"schema": "aten::_nested_tensor_size(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_strides(const Tensor & self); // {"schema": "aten::_nested_tensor_strides(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_storage_offsets(const Tensor & self); // {"schema": "aten::_nested_tensor_storage_offsets(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_from_padded_and_nested_example(const Tensor & padded, const Tensor & nt_example); // {"schema": "aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_view_from_buffer(const Tensor & self, const Tensor & nested_size, const Tensor & nested_strides, const Tensor & offsets); // {"schema": "aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor _nested_view_from_buffer_copy(const Tensor & self, const Tensor & nested_size, const Tensor & nested_strides, const Tensor & offsets); // {"schema": "aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _nested_view_from_jagged(const Tensor & self, const Tensor & offsets, const Tensor & dummy, const c10::optional & lengths, int64_t ragged_idx); // {"schema": "aten::_nested_view_from_jagged(Tensor(a) self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor _nested_view_from_jagged_copy(const Tensor & self, const Tensor & offsets, const Tensor & dummy, const c10::optional & lengths, int64_t ragged_idx); // {"schema": "aten::_nested_view_from_jagged_copy(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _nested_get_values(const Tensor & self); // {"schema": "aten::_nested_get_values(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor _nested_get_values_copy(const Tensor & self); // {"schema": "aten::_nested_get_values_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _nested_get_offsets(const Tensor & self); // {"schema": "aten::_nested_get_offsets(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_get_lengths(const Tensor & self); // {"schema": "aten::_nested_get_lengths(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +int64_t _nested_get_ragged_idx(const Tensor & self); // {"schema": "aten::_nested_get_ragged_idx(Tensor self) -> int", "dispatch": "True", "default": "False"} +Tensor _nested_get_jagged_dummy(const Tensor & any); // {"schema": "aten::_nested_get_jagged_dummy(Tensor any) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _trilinear(const Tensor & i1, const Tensor & i2, const Tensor & i3, IntArrayRef expand1, IntArrayRef expand2, IntArrayRef expand3, IntArrayRef sumdim, int64_t unroll_dim); // {"schema": "aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor triplet_margin_loss(const Tensor & anchor, const Tensor & positive, const Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction); // {"schema": "aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor trunc(const Tensor & self); // {"schema": "aten::trunc(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & trunc_(Tensor & self); // {"schema": "aten::trunc_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & trunc_out(const Tensor & self, Tensor & out); // {"schema": "aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fix(const Tensor & self); // {"schema": "aten::fix(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fix_(Tensor & self); // {"schema": "aten::fix_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & fix_out(const Tensor & self, Tensor & out); // {"schema": "aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor type_as(const Tensor & self, const Tensor & other); // {"schema": "aten::type_as(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +bool _has_compatible_shallow_copy_type(const Tensor & self, const Tensor & from); // {"schema": "aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool", "dispatch": "False", "default": "True"} +::std::tuple _unique(const Tensor & self, bool sorted, bool return_inverse); // {"schema": "aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple unique_dim(const Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts); // {"schema": "aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple unique_consecutive(const Tensor & self, bool return_inverse, bool return_counts, c10::optional dim); // {"schema": "aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple unique_dim_consecutive(const Tensor & self, int64_t dim, bool return_inverse, bool return_counts); // {"schema": "aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _unique2(const Tensor & self, bool sorted, bool return_inverse, bool return_counts); // {"schema": "aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _unsafe_view(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor", "dispatch": "True", "default": "True"} +Tensor unsqueeze(const Tensor & self, int64_t dim); // {"schema": "aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & unsqueeze_(Tensor & self, int64_t dim); // {"schema": "aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor vander(const Tensor & x, c10::optional N, bool increasing); // {"schema": "aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor var(const Tensor & self, bool unbiased); // {"schema": "aten::var(Tensor self, bool unbiased=True) -> Tensor", "dispatch": "False", "default": "True"} +Tensor var(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim); // {"schema": "aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor var(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & var_out(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim, Tensor & out); // {"schema": "aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & var_out(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, Tensor & out); // {"schema": "aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor var(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); // {"schema": "aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & var_out(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim, Tensor & out); // {"schema": "aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor var(const Tensor & self, DimnameList dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & var_out(const Tensor & self, DimnameList dim, const c10::optional & correction, bool keepdim, Tensor & out); // {"schema": "aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple var_mean(const Tensor & self, bool unbiased); // {"schema": "aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple var_mean(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim); // {"schema": "aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple var_mean(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple var_mean(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); // {"schema": "aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple var_mean(const Tensor & self, DimnameList dim, const c10::optional & correction, bool keepdim); // {"schema": "aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor view_as(const Tensor & self, const Tensor & other); // {"schema": "aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor where(const Tensor & condition, const Tensor & self, const Tensor & other); // {"schema": "aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & where_out(const Tensor & condition, const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor where(const Tensor & condition, const Scalar & self, const Tensor & other); // {"schema": "aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor where(const Tensor & condition, const Tensor & self, const Scalar & other); // {"schema": "aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor where(const Tensor & condition, const Scalar & self, const Scalar & other); // {"schema": "aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector where(const Tensor & condition); // {"schema": "aten::where(Tensor condition) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor norm_except_dim(const Tensor & v, int64_t pow, int64_t dim); // {"schema": "aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _weight_norm(const Tensor & v, const Tensor & g, int64_t dim); // {"schema": "aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _weight_norm_interface(const Tensor & v, const Tensor & g, int64_t dim); // {"schema": "aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _weight_norm_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim); // {"schema": "aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _weight_norm_differentiable_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim); // {"schema": "aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor zeros(IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _efficientzerotensor(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor zeros(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & zeros_out(c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor zeros_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output); // {"schema": "aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _standard_gamma(const Tensor & self, c10::optional generator); // {"schema": "aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total); // {"schema": "aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sample_dirichlet(const Tensor & self, c10::optional generator); // {"schema": "aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor poisson(const Tensor & self, c10::optional generator); // {"schema": "aten::poisson(Tensor self, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor binomial(const Tensor & count, const Tensor & prob, c10::optional generator); // {"schema": "aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor native_norm(const Tensor & self, const Scalar & p); // {"schema": "aten::native_norm(Tensor self, Scalar p=2) -> Tensor", "dispatch": "True", "default": "False"} +Tensor native_norm(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_sum(const Tensor & self); // {"schema": "aten::_sparse_sum(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_sum(const Tensor & self, ScalarType dtype); // {"schema": "aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_sum(const Tensor & self, IntArrayRef dim); // {"schema": "aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _sparse_sum(const Tensor & self, IntArrayRef dim, ScalarType dtype); // {"schema": "aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_sum_backward(const Tensor & grad, const Tensor & self, IntArrayRef dim); // {"schema": "aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_csr_sum(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_csr_prod(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_softmax(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_softmax(const Tensor & self, int64_t dim, bool half_to_float); // {"schema": "aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self); // {"schema": "aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_log_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_log_softmax(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_log_softmax(const Tensor & self, int64_t dim, bool half_to_float); // {"schema": "aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_log_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self); // {"schema": "aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _spdiags(const Tensor & diagonals, const Tensor & offsets, IntArrayRef shape, c10::optional layout); // {"schema": "aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor norm(const Tensor & self, const c10::optional & p, ScalarType dtype); // {"schema": "aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor norm(const Tensor & self, const Scalar & p); // {"schema": "aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor norm(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, ScalarType dtype); // {"schema": "aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor norm(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim); // {"schema": "aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, ScalarType dtype, Tensor & out); // {"schema": "aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor norm(const Tensor & self, const c10::optional & p, DimnameList dim, bool keepdim, ScalarType dtype); // {"schema": "aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor", "dispatch": "False", "default": "True"} +Tensor norm(const Tensor & self, const c10::optional & p, DimnameList dim, bool keepdim); // {"schema": "aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, DimnameList dim, bool keepdim, ScalarType dtype, Tensor & out); // {"schema": "aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, DimnameList dim, bool keepdim, Tensor & out); // {"schema": "aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple frexp(const Tensor & self); // {"schema": "aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)", "dispatch": "True", "default": "True"} +::std::tuple frexp_out(const Tensor & self, Tensor & mantissa, Tensor & exponent); // {"schema": "aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)", "dispatch": "True", "default": "False"} +Tensor frobenius_norm(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & frobenius_norm_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nuclear_norm(const Tensor & self, bool keepdim); // {"schema": "aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nuclear_norm_out(const Tensor & self, bool keepdim, Tensor & out); // {"schema": "aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nuclear_norm(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nuclear_norm_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor clone(const Tensor & self, c10::optional memory_format); // {"schema": "aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor positive(const Tensor & self); // {"schema": "aten::positive(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +const Tensor & resize_as_(const Tensor & self, const Tensor & the_template, c10::optional memory_format); // {"schema": "aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +const Tensor & resize_as_sparse_(const Tensor & self, const Tensor & the_template); // {"schema": "aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & zero_(Tensor & self); // {"schema": "aten::zero_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & sub_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sub(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sub_(Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor sub(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sub_(Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & subtract_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor subtract(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & subtract_(Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor subtract(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & subtract_(Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor rsub(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & heaviside_out(const Tensor & self, const Tensor & values, Tensor & out); // {"schema": "aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor heaviside(const Tensor & self, const Tensor & values); // {"schema": "aten::heaviside(Tensor self, Tensor values) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & heaviside_(Tensor & self, const Tensor & values); // {"schema": "aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor rsub(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _sparse_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sparse_sampled_addmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sparse_sampled_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _sparse_mm_reduce_impl(const Tensor & self, const Tensor & other, c10::string_view reduce); // {"schema": "aten::_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _sparse_mm_reduce_impl_backward(const Tensor & self, const Tensor & grad_out, const Tensor & weight, c10::string_view reduce, const Tensor & arg_out, ::std::array output_mask); // {"schema": "aten::_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & addmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addmm_(Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _addmm_activation_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, bool use_gelu, Tensor & out); // {"schema": "aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _addmm_activation(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, bool use_gelu); // {"schema": "aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple _scaled_mm(const Tensor & self, const Tensor & mat2, const c10::optional & bias, c10::optional out_dtype, const c10::optional & scale_a, const c10::optional & scale_b, const c10::optional & scale_result, bool use_fast_accum); // {"schema": "aten::_scaled_mm(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_mm_out(const Tensor & self, const Tensor & mat2, const c10::optional & bias, c10::optional out_dtype, const c10::optional & scale_a, const c10::optional & scale_b, const c10::optional & scale_result, bool use_fast_accum, Tensor & out, Tensor & out_amax); // {"schema": "aten::_scaled_mm.out(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False, Tensor(a!) out, Tensor(b!) out_amax) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +Tensor sparse_compressed_tensor(const Tensor & compressed_indices, const Tensor & plain_indices, const Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sparse_csr_tensor(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_csc_tensor(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_bsr_tensor(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_bsc_tensor(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_compressed_tensor(const Tensor & compressed_indices, const Tensor & plain_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sparse_csr_tensor(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_csc_tensor(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_bsr_tensor(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_bsc_tensor(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_compressed_tensor_unsafe(const Tensor & compressed_indices, const Tensor & plain_indices, const Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_csr_tensor_unsafe(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_csc_tensor_unsafe(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_bsr_tensor_unsafe(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_bsc_tensor_unsafe(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_coo_tensor(IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); // {"schema": "aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); // {"schema": "aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_coo_tensor_unsafe(const Tensor & indices, const Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); // {"schema": "aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor", "dispatch": "False", "default": "True"} +void _validate_sparse_coo_tensor_args(const Tensor & indices, const Tensor & values, IntArrayRef size, c10::optional is_coalesced); // {"schema": "aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size, bool? is_coalesced=None) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_compressed_tensor_args(const Tensor & compressed_indices, const Tensor & plain_indices, const Tensor & values, IntArrayRef size, Layout layout); // {"schema": "aten::_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_csr_tensor_args(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size); // {"schema": "aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_csc_tensor_args(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size); // {"schema": "aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_bsr_tensor_args(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size); // {"schema": "aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_bsc_tensor_args(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size); // {"schema": "aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()", "dispatch": "False", "default": "True"} +Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const Tensor & indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); // {"schema": "aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor", "dispatch": "True", "default": "False"} +const Tensor & sparse_resize_(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); // {"schema": "aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)", "dispatch": "True", "default": "False"} +const Tensor & sparse_resize_and_clear_(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); // {"schema": "aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sparse_mask(const Tensor & self, const Tensor & mask); // {"schema": "aten::sparse_mask(Tensor self, Tensor mask) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_mask_projection(const Tensor & self, const Tensor & mask, bool accumulate_matches); // {"schema": "aten::_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor", "dispatch": "True", "default": "False"} +::std::vector _to_cpu(TensorList tensors); // {"schema": "aten::_to_cpu(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor to_dense(const Tensor & self, c10::optional dtype, c10::optional masked_grad); // {"schema": "aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_dense(const Tensor & self, c10::optional dtype, c10::optional masked_grad); // {"schema": "aten::_to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_dense_backward(const Tensor & grad, const Tensor & input, c10::optional masked_grad); // {"schema": "aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor", "dispatch": "False", "default": "True"} +int64_t sparse_dim(const Tensor & self); // {"schema": "aten::sparse_dim(Tensor self) -> int", "dispatch": "True", "default": "False"} +int64_t _dimI(const Tensor & self); // {"schema": "aten::_dimI(Tensor self) -> int", "dispatch": "True", "default": "False"} +int64_t dense_dim(const Tensor & self); // {"schema": "aten::dense_dim(Tensor self) -> int", "dispatch": "True", "default": "False"} +int64_t _dimV(const Tensor & self); // {"schema": "aten::_dimV(Tensor self) -> int", "dispatch": "True", "default": "False"} +int64_t _nnz(const Tensor & self); // {"schema": "aten::_nnz(Tensor self) -> int", "dispatch": "True", "default": "False"} +Tensor coalesce(const Tensor & self); // {"schema": "aten::coalesce(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _coalesce(const Tensor & self); // {"schema": "aten::_coalesce(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +bool is_coalesced(const Tensor & self); // {"schema": "aten::is_coalesced(Tensor self) -> bool", "dispatch": "True", "default": "True"} +Tensor _indices(const Tensor & self); // {"schema": "aten::_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor _values(const Tensor & self); // {"schema": "aten::_values(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor & _coalesced_(Tensor & self, bool coalesced); // {"schema": "aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor indices(const Tensor & self); // {"schema": "aten::indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor values(const Tensor & self); // {"schema": "aten::values(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor crow_indices(const Tensor & self); // {"schema": "aten::crow_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor col_indices(const Tensor & self); // {"schema": "aten::col_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor ccol_indices(const Tensor & self); // {"schema": "aten::ccol_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor row_indices(const Tensor & self); // {"schema": "aten::row_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & hspmm_out(const Tensor & mat1, const Tensor & mat2, Tensor & out); // {"schema": "aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hspmm(const Tensor & mat1, const Tensor & mat2); // {"schema": "aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & copy_sparse_to_sparse_(Tensor & self, const Tensor & src, bool non_blocking); // {"schema": "aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::vector unbind(const Tensor & self, int64_t dim); // {"schema": "aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]", "dispatch": "True", "default": "True"} +::std::vector unbind(const Tensor & self, Dimname dim); // {"schema": "aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +Tensor to_sparse(const Tensor & self, int64_t sparse_dim); // {"schema": "aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_sparse(const Tensor & self, int64_t sparse_dim); // {"schema": "aten::_to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse(const Tensor & self, c10::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim); // {"schema": "aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_sparse(const Tensor & self, c10::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim); // {"schema": "aten::_to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse_csr(const Tensor & self, c10::optional dense_dim); // {"schema": "aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_sparse_csr(const Tensor & self, c10::optional dense_dim); // {"schema": "aten::_to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse_csc(const Tensor & self, c10::optional dense_dim); // {"schema": "aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_sparse_csc(const Tensor & self, c10::optional dense_dim); // {"schema": "aten::_to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse_bsr(const Tensor & self, IntArrayRef blocksize, c10::optional dense_dim); // {"schema": "aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_sparse_bsr(const Tensor & self, IntArrayRef blocksize, c10::optional dense_dim); // {"schema": "aten::_to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse_bsc(const Tensor & self, IntArrayRef blocksize, c10::optional dense_dim); // {"schema": "aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_sparse_bsc(const Tensor & self, IntArrayRef blocksize, c10::optional dense_dim); // {"schema": "aten::_to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _to_sparse_semi_structured(const Tensor & dense); // {"schema": "aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor to_mkldnn(const Tensor & self, c10::optional dtype); // {"schema": "aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_reorder_conv2d_weight(const Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, OptionalSymIntArrayRef input_size); // {"schema": "aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_reorder_conv3d_weight(const Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups); // {"schema": "aten::mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_mkldnn_backward(const Tensor & grad, const Tensor & input); // {"schema": "aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantize_per_tensor_dynamic(const Tensor & self, ScalarType dtype, bool reduce_range); // {"schema": "aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantize_per_tensor(const Tensor & self, double scale, int64_t zero_point, ScalarType dtype); // {"schema": "aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantize_per_tensor(const Tensor & self, const Tensor & scale, const Tensor & zero_point, ScalarType dtype); // {"schema": "aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "False"} +::std::vector quantize_per_tensor(TensorList tensors, const Tensor & scales, const Tensor & zero_points, ScalarType dtype); // {"schema": "aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]", "dispatch": "True", "default": "False"} +Tensor quantize_per_channel(const Tensor & self, const Tensor & scales, const Tensor & zero_points, int64_t axis, ScalarType dtype); // {"schema": "aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "False"} +Tensor dequantize(const Tensor & self); // {"schema": "aten::dequantize.self(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::vector dequantize(TensorList tensors); // {"schema": "aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]", "dispatch": "True", "default": "False"} +double q_scale(const Tensor & self); // {"schema": "aten::q_scale(Tensor self) -> float", "dispatch": "True", "default": "False"} +int64_t q_zero_point(const Tensor & self); // {"schema": "aten::q_zero_point(Tensor self) -> int", "dispatch": "True", "default": "False"} +Tensor q_per_channel_scales(const Tensor & self); // {"schema": "aten::q_per_channel_scales(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor q_per_channel_zero_points(const Tensor & self); // {"schema": "aten::q_per_channel_zero_points(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +int64_t q_per_channel_axis(const Tensor & self); // {"schema": "aten::q_per_channel_axis(Tensor self) -> int", "dispatch": "True", "default": "False"} +Tensor int_repr(const Tensor & self); // {"schema": "aten::int_repr(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _make_per_tensor_quantized_tensor(const Tensor & self, double scale, int64_t zero_point); // {"schema": "aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _make_per_channel_quantized_tensor(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis); // {"schema": "aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor", "dispatch": "True", "default": "False"} +QScheme qscheme(const Tensor & self); // {"schema": "aten::qscheme(Tensor self) -> QScheme", "dispatch": "True", "default": "False"} +Tensor fake_quantize_per_tensor_affine(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fake_quantize_per_tensor_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple fake_quantize_per_tensor_affine_cachemask(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)", "dispatch": "True", "default": "False"} +::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(const Tensor & self, const Tensor & scale, const Tensor & zero_point, const Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max); // {"schema": "aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)", "dispatch": "True", "default": "False"} +Tensor fake_quantize_per_tensor_affine_cachemask_backward(const Tensor & grad, const Tensor & mask); // {"schema": "aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _fake_quantize_learnable_per_tensor_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor); // {"schema": "aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _fake_quantize_learnable_per_tensor_affine_backward(const Tensor & grad, const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor); // {"schema": "aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor fake_quantize_per_channel_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple fake_quantize_per_channel_affine_cachemask(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)", "dispatch": "True", "default": "False"} +Tensor fake_quantize_per_channel_affine_cachemask_backward(const Tensor & grad, const Tensor & mask); // {"schema": "aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _fake_quantize_learnable_per_channel_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor); // {"schema": "aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _fake_quantize_learnable_per_channel_affine_backward(const Tensor & grad, const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor); // {"schema": "aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor fused_moving_avg_obs_fake_quant(const Tensor & self, const Tensor & observer_on, const Tensor & fake_quant_on, Tensor & running_min, Tensor & running_max, Tensor & scale, Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); // {"schema": "aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _fused_moving_avg_obs_fq_helper(const Tensor & self, const Tensor & observer_on, const Tensor & fake_quant_on, Tensor & running_min, Tensor & running_max, Tensor & scale, Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); // {"schema": "aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)", "dispatch": "True", "default": "False"} +::std::tuple _choose_qparams_per_tensor(const Tensor & self, bool reduce_range); // {"schema": "aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)", "dispatch": "False", "default": "True"} +Tensor _saturate_weight_to_fp16(const Tensor & weight); // {"schema": "aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple choose_qparams_optimized(const Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width); // {"schema": "aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor _autocast_to_reduced_precision(const Tensor & self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype); // {"schema": "aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _autocast_to_full_precision(const Tensor & self, bool cuda_enabled, bool cpu_enabled); // {"schema": "aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _to_copy(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, c10::optional memory_format); // {"schema": "aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor to(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, bool copy, c10::optional memory_format); // {"schema": "aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor to(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy, c10::optional memory_format); // {"schema": "aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor to(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy, c10::optional memory_format); // {"schema": "aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor to(const Tensor & self, const Tensor & other, bool non_blocking, bool copy, c10::optional memory_format); // {"schema": "aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +::std::vector meshgrid(TensorList tensors); // {"schema": "aten::meshgrid(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector meshgrid(TensorList tensors, c10::string_view indexing); // {"schema": "aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor cartesian_prod(TensorList tensors); // {"schema": "aten::cartesian_prod(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor combinations(const Tensor & self, int64_t r, bool with_replacement); // {"schema": "aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor", "dispatch": "False", "default": "True"} +Scalar item(const Tensor & self); // {"schema": "aten::item(Tensor self) -> Scalar", "dispatch": "False", "default": "True"} +ScalarType result_type(const Tensor & tensor, const Tensor & other); // {"schema": "aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType", "dispatch": "False", "default": "True"} +ScalarType result_type(const Tensor & tensor, const Scalar & other); // {"schema": "aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType", "dispatch": "False", "default": "True"} +ScalarType result_type(const Scalar & scalar, const Tensor & tensor); // {"schema": "aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType", "dispatch": "False", "default": "True"} +ScalarType result_type(const Scalar & scalar1, const Scalar & scalar2); // {"schema": "aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType", "dispatch": "False", "default": "True"} +bool can_cast(ScalarType from, ScalarType to); // {"schema": "aten::can_cast(ScalarType from, ScalarType to) -> bool", "dispatch": "False", "default": "True"} +ScalarType promote_types(ScalarType type1, ScalarType type2); // {"schema": "aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType", "dispatch": "False", "default": "True"} +Scalar _local_scalar_dense(const Tensor & self); // {"schema": "aten::_local_scalar_dense(Tensor self) -> Scalar", "dispatch": "True", "default": "False"} +::std::tuple _lstm_mps(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple,::std::vector> lstm_mps_backward(const c10::optional & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & z_state, const Tensor & cell_state_fwd, const Tensor & input, const Tensor & layersOutputs, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])", "dispatch": "True", "default": "False"} +::std::tuple _thnn_fused_lstm_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & cx, const c10::optional & input_bias, const c10::optional & hidden_bias); // {"schema": "aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _thnn_fused_lstm_cell_backward_impl(const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & cx, const Tensor & cy, const Tensor & workspace, bool has_bias); // {"schema": "aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _thnn_fused_lstm_cell_backward(const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & cx, const Tensor & cy, const Tensor & workspace, bool has_bias); // {"schema": "aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple _thnn_differentiable_lstm_cell_backward(const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & input_gates, const Tensor & hidden_gates, const c10::optional & input_bias, const c10::optional & hidden_bias, const Tensor & cx, const Tensor & cy); // {"schema": "aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple _thnn_fused_gru_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias); // {"schema": "aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _thnn_fused_gru_cell_backward(const Tensor & grad_hy, const Tensor & workspace, bool has_bias); // {"schema": "aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _thnn_differentiable_gru_cell_backward(const Tensor & grad_hy, const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias); // {"schema": "aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple lstm(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple lstm(const Tensor & data, const Tensor & batch_sizes, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); // {"schema": "aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple gru(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple gru(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); // {"schema": "aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple rnn_tanh(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple rnn_tanh(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); // {"schema": "aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple rnn_relu(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple rnn_relu(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); // {"schema": "aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); // {"schema": "aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); // {"schema": "aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); // {"schema": "aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); // {"schema": "aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple quantized_lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, const Scalar & scale_ih, const Scalar & scale_hh, const Scalar & zero_point_ih, const Scalar & zero_point_hh); // {"schema": "aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor quantized_gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, const Scalar & scale_ih, const Scalar & scale_hh, const Scalar & zero_point_ih, const Scalar & zero_point_hh); // {"schema": "aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantized_rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, const Scalar & scale_ih, const Scalar & scale_hh, const Scalar & zero_point_ih, const Scalar & zero_point_hh); // {"schema": "aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantized_rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, const Scalar & scale_ih, const Scalar & scale_hh, const Scalar & zero_point_ih, const Scalar & zero_point_hh); // {"schema": "aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _pack_padded_sequence(const Tensor & input, const Tensor & lengths, bool batch_first); // {"schema": "aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor _pack_padded_sequence_backward(const Tensor & grad, c10::SymIntArrayRef input_size, const Tensor & batch_sizes, bool batch_first); // {"schema": "aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _pad_packed_sequence(const Tensor & data, const Tensor & batch_sizes, bool batch_first, const Scalar & padding_value, int64_t total_length); // {"schema": "aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor & set_(Tensor & self, Storage source); // {"schema": "aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & set_(Tensor & self, Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & set_(Tensor & self, const Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & set_(Tensor & self, const Tensor & source); // {"schema": "aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & set_(Tensor & self); // {"schema": "aten::set_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lift(const Tensor & self); // {"schema": "aten::lift(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor lift_fresh(const Tensor & self); // {"schema": "aten::lift_fresh(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor lift_fresh_copy(const Tensor & self); // {"schema": "aten::lift_fresh_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +bool is_set_to(const Tensor & self, const Tensor & tensor); // {"schema": "aten::is_set_to(Tensor self, Tensor tensor) -> bool", "dispatch": "True", "default": "False"} +Tensor & masked_fill_(Tensor & self, const Tensor & mask, const Scalar & value); // {"schema": "aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor masked_fill(const Tensor & self, const Tensor & mask, const Scalar & value); // {"schema": "aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & masked_fill_(Tensor & self, const Tensor & mask, const Tensor & value); // {"schema": "aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor masked_fill(const Tensor & self, const Tensor & mask, const Tensor & value); // {"schema": "aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source); // {"schema": "aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor masked_scatter(const Tensor & self, const Tensor & mask, const Tensor & source); // {"schema": "aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor", "dispatch": "True", "default": "True"} +Tensor masked_scatter_backward(const Tensor & grad_output, const Tensor & mask, c10::SymIntArrayRef sizes); // {"schema": "aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _masked_softmax(const Tensor & self, const Tensor & mask, c10::optional dim, c10::optional mask_type); // {"schema": "aten::_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _masked_softmax_backward(const Tensor & grad_output, const Tensor & output, const Tensor & mask, c10::optional dim); // {"schema": "aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor view(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor view(const Tensor & self, ScalarType dtype); // {"schema": "aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & put_(Tensor & self, const Tensor & index, const Tensor & source, bool accumulate); // {"schema": "aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor put(const Tensor & self, const Tensor & index, const Tensor & source, bool accumulate); // {"schema": "aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_add_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar & alpha, Tensor & out); // {"schema": "aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & index_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar & alpha); // {"schema": "aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor index_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar & alpha); // {"schema": "aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor index_add(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source, const Scalar & alpha); // {"schema": "aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & index_reduce_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, c10::string_view reduce, bool include_self, Tensor & out); // {"schema": "aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & index_reduce_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, c10::string_view reduce, bool include_self); // {"schema": "aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor index_reduce(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, c10::string_view reduce, bool include_self); // {"schema": "aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_fill_(Tensor & self, int64_t dim, const Tensor & index, const Scalar & value); // {"schema": "aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor index_fill(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value); // {"schema": "aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_fill_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & value); // {"schema": "aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor index_fill(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & value); // {"schema": "aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_fill_(Tensor & self, Dimname dim, const Tensor & index, const Scalar & value); // {"schema": "aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & index_fill_(Tensor & self, Dimname dim, const Tensor & index, const Tensor & value); // {"schema": "aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor index_fill(const Tensor & self, Dimname dim, const Tensor & index, const Scalar & value); // {"schema": "aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor", "dispatch": "False", "default": "True"} +Tensor index_fill(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & value); // {"schema": "aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor", "dispatch": "False", "default": "True"} +Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, Tensor & out); // {"schema": "aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value); // {"schema": "aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Scalar & value); // {"schema": "aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_out(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, Tensor & out); // {"schema": "aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce); // {"schema": "aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce); // {"schema": "aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce, Tensor & out); // {"schema": "aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, c10::string_view reduce); // {"schema": "aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, c10::string_view reduce); // {"schema": "aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_out(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, c10::string_view reduce, Tensor & out); // {"schema": "aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor", "dispatch": "False", "default": "True"} +Tensor scatter(const Tensor & self, Dimname dim, const Tensor & index, const Scalar & value); // {"schema": "aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor", "dispatch": "False", "default": "True"} +Tensor scatter_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_add_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, Tensor & out); // {"schema": "aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter_add(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor", "dispatch": "False", "default": "True"} +Tensor scatter_reduce(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce, bool include_self); // {"schema": "aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_reduce_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce, bool include_self); // {"schema": "aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_reduce_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce, bool include_self, Tensor & out); // {"schema": "aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & eq_(Tensor & self, const Scalar & other); // {"schema": "aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & eq_(Tensor & self, const Tensor & other); // {"schema": "aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_and_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bitwise_and_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_and(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_and(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_and(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_and_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_and_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor __and__(const Tensor & self, const Scalar & other); // {"schema": "aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor __and__(const Tensor & self, const Tensor & other); // {"schema": "aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & __iand__(Tensor & self, const Scalar & other); // {"schema": "aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & __iand__(Tensor & self, const Tensor & other); // {"schema": "aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & bitwise_or_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bitwise_or_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_or(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_or(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_or(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_or_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_or_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor __or__(const Tensor & self, const Scalar & other); // {"schema": "aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor __or__(const Tensor & self, const Tensor & other); // {"schema": "aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & __ior__(Tensor & self, const Scalar & other); // {"schema": "aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & __ior__(Tensor & self, const Tensor & other); // {"schema": "aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & bitwise_xor_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bitwise_xor_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_xor(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_xor(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_xor(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_xor_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_xor_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor __xor__(const Tensor & self, const Scalar & other); // {"schema": "aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor __xor__(const Tensor & self, const Tensor & other); // {"schema": "aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & __ixor__(Tensor & self, const Scalar & other); // {"schema": "aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & __ixor__(Tensor & self, const Tensor & other); // {"schema": "aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor __lshift__(const Tensor & self, const Scalar & other); // {"schema": "aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor __lshift__(const Tensor & self, const Tensor & other); // {"schema": "aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & __ilshift__(Tensor & self, const Scalar & other); // {"schema": "aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & __ilshift__(Tensor & self, const Tensor & other); // {"schema": "aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bitwise_left_shift(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bitwise_left_shift(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_left_shift(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor __rshift__(const Tensor & self, const Scalar & other); // {"schema": "aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor __rshift__(const Tensor & self, const Tensor & other); // {"schema": "aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & __irshift__(Tensor & self, const Scalar & other); // {"schema": "aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & __irshift__(Tensor & self, const Tensor & other); // {"schema": "aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bitwise_right_shift(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bitwise_right_shift(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_right_shift(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tril_(Tensor & self, int64_t diagonal); // {"schema": "aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & triu_(Tensor & self, int64_t diagonal); // {"schema": "aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & digamma_(Tensor & self); // {"schema": "aten::digamma_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lerp_(Tensor & self, const Tensor & end, const Scalar & weight); // {"schema": "aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lerp_(Tensor & self, const Tensor & end, const Tensor & weight); // {"schema": "aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & addbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & addbmm_out(const Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & random_(Tensor & self, int64_t from, c10::optional to, c10::optional generator); // {"schema": "aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & random_(Tensor & self, int64_t to, c10::optional generator); // {"schema": "aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & random_(Tensor & self, c10::optional generator); // {"schema": "aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & uniform_(Tensor & self, double from, double to, c10::optional generator); // {"schema": "aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & cauchy_(Tensor & self, double median, double sigma, c10::optional generator); // {"schema": "aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & log_normal_(Tensor & self, double mean, double std, c10::optional generator); // {"schema": "aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & exponential_(Tensor & self, double lambd, c10::optional generator); // {"schema": "aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & geometric_(Tensor & self, double p, c10::optional generator); // {"schema": "aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & diag_out(const Tensor & self, int64_t diagonal, Tensor & out); // {"schema": "aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor diag(const Tensor & self, int64_t diagonal); // {"schema": "aten::diag(Tensor self, int diagonal=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & cross_out(const Tensor & self, const Tensor & other, c10::optional dim, Tensor & out); // {"schema": "aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor cross(const Tensor & self, const Tensor & other, c10::optional dim); // {"schema": "aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & triu_out(const Tensor & self, int64_t diagonal, Tensor & out); // {"schema": "aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor triu(const Tensor & self, int64_t diagonal); // {"schema": "aten::triu(Tensor self, int diagonal=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tril_out(const Tensor & self, int64_t diagonal, Tensor & out); // {"schema": "aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor tril(const Tensor & self, int64_t diagonal); // {"schema": "aten::tril(Tensor self, int diagonal=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor tril_indices(int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor triu_indices(int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor trace(const Tensor & self); // {"schema": "aten::trace(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor trace_backward(const Tensor & grad, c10::SymIntArrayRef sizes); // {"schema": "aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & ne_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ne(const Tensor & self, const Scalar & other); // {"schema": "aten::ne.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ne_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ne(const Tensor & self, const Tensor & other); // {"schema": "aten::ne.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ne_(Tensor & self, const Scalar & other); // {"schema": "aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ne_(Tensor & self, const Tensor & other); // {"schema": "aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & not_equal_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor not_equal(const Tensor & self, const Scalar & other); // {"schema": "aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & not_equal_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor not_equal(const Tensor & self, const Tensor & other); // {"schema": "aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & not_equal_(Tensor & self, const Scalar & other); // {"schema": "aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & not_equal_(Tensor & self, const Tensor & other); // {"schema": "aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & eq_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor eq(const Tensor & self, const Scalar & other); // {"schema": "aten::eq.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & eq_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor eq(const Tensor & self, const Tensor & other); // {"schema": "aten::eq.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ge_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ge(const Tensor & self, const Scalar & other); // {"schema": "aten::ge.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ge_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ge(const Tensor & self, const Tensor & other); // {"schema": "aten::ge.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ge_(Tensor & self, const Scalar & other); // {"schema": "aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ge_(Tensor & self, const Tensor & other); // {"schema": "aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & greater_equal_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor greater_equal(const Tensor & self, const Scalar & other); // {"schema": "aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & greater_equal_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor greater_equal(const Tensor & self, const Tensor & other); // {"schema": "aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & greater_equal_(Tensor & self, const Scalar & other); // {"schema": "aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & greater_equal_(Tensor & self, const Tensor & other); // {"schema": "aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & le_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor le(const Tensor & self, const Scalar & other); // {"schema": "aten::le.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & le_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor le(const Tensor & self, const Tensor & other); // {"schema": "aten::le.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & le_(Tensor & self, const Scalar & other); // {"schema": "aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & le_(Tensor & self, const Tensor & other); // {"schema": "aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & less_equal_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor less_equal(const Tensor & self, const Scalar & other); // {"schema": "aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & less_equal_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor less_equal(const Tensor & self, const Tensor & other); // {"schema": "aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & less_equal_(Tensor & self, const Scalar & other); // {"schema": "aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & less_equal_(Tensor & self, const Tensor & other); // {"schema": "aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & gt_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gt(const Tensor & self, const Scalar & other); // {"schema": "aten::gt.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & gt_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gt(const Tensor & self, const Tensor & other); // {"schema": "aten::gt.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & gt_(Tensor & self, const Scalar & other); // {"schema": "aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & gt_(Tensor & self, const Tensor & other); // {"schema": "aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & greater_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor greater(const Tensor & self, const Scalar & other); // {"schema": "aten::greater.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & greater_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor greater(const Tensor & self, const Tensor & other); // {"schema": "aten::greater.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & greater_(Tensor & self, const Scalar & other); // {"schema": "aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & greater_(Tensor & self, const Tensor & other); // {"schema": "aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & lt_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lt(const Tensor & self, const Scalar & other); // {"schema": "aten::lt.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & lt_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lt(const Tensor & self, const Tensor & other); // {"schema": "aten::lt.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & lt_(Tensor & self, const Scalar & other); // {"schema": "aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lt_(Tensor & self, const Tensor & other); // {"schema": "aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & less_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor less(const Tensor & self, const Scalar & other); // {"schema": "aten::less.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & less_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor less(const Tensor & self, const Tensor & other); // {"schema": "aten::less.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & less_(Tensor & self, const Scalar & other); // {"schema": "aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & less_(Tensor & self, const Tensor & other); // {"schema": "aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & take_out(const Tensor & self, const Tensor & index, Tensor & out); // {"schema": "aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor take(const Tensor & self, const Tensor & index); // {"schema": "aten::take(Tensor self, Tensor index) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & take_along_dim_out(const Tensor & self, const Tensor & indices, c10::optional dim, Tensor & out); // {"schema": "aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor take_along_dim(const Tensor & self, const Tensor & indices, c10::optional dim); // {"schema": "aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & index_select_out(const Tensor & self, int64_t dim, const Tensor & index, Tensor & out); // {"schema": "aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor index_select(const Tensor & self, int64_t dim, const Tensor & index); // {"schema": "aten::index_select(Tensor self, int dim, Tensor index) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & index_select_out(const Tensor & self, Dimname dim, const Tensor & index, Tensor & out); // {"schema": "aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor index_select(const Tensor & self, Dimname dim, const Tensor & index); // {"schema": "aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor", "dispatch": "False", "default": "True"} +Tensor index_select_backward(const Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const Tensor & index); // {"schema": "aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & masked_select_out(const Tensor & self, const Tensor & mask, Tensor & out); // {"schema": "aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor masked_select(const Tensor & self, const Tensor & mask); // {"schema": "aten::masked_select(Tensor self, Tensor mask) -> Tensor", "dispatch": "True", "default": "False"} +Tensor masked_select_backward(const Tensor & grad, const Tensor & input, const Tensor & mask); // {"schema": "aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nonzero_out(const Tensor & self, Tensor & out); // {"schema": "aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nonzero(const Tensor & self); // {"schema": "aten::nonzero(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & nonzero_static_out(const Tensor & self, int64_t size, int64_t fill_value, Tensor & out); // {"schema": "aten::nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nonzero_static(const Tensor & self, int64_t size, int64_t fill_value); // {"schema": "aten::nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor", "dispatch": "True", "default": "False"} +::std::vector nonzero_numpy(const Tensor & self); // {"schema": "aten::nonzero_numpy(Tensor self) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor argwhere(const Tensor & self); // {"schema": "aten::argwhere(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & gather_out(const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad, Tensor & out); // {"schema": "aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gather(const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad); // {"schema": "aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor gather_backward(const Tensor & grad, const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad); // {"schema": "aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & gather_out(const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad, Tensor & out); // {"schema": "aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor gather(const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad); // {"schema": "aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _gather_sparse_backward(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & grad); // {"schema": "aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & addcmul_out(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value, Tensor & out); // {"schema": "aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value); // {"schema": "aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addcmul_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value); // {"schema": "aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & addcdiv_out(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value, Tensor & out); // {"schema": "aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value); // {"schema": "aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addcdiv_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value); // {"schema": "aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor cross_entropy_loss(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing); // {"schema": "aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple triangular_solve_out(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular, Tensor & X, Tensor & M); // {"schema": "aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)", "dispatch": "True", "default": "False"} +::std::tuple triangular_solve(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular); // {"schema": "aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)", "dispatch": "True", "default": "True"} +void _linalg_check_errors(const Tensor & info, c10::string_view api_name, bool is_matrix); // {"schema": "aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()", "dispatch": "True", "default": "True"} +Tensor & linalg_solve_triangular_out(const Tensor & self, const Tensor & B, bool upper, bool left, bool unitriangular, Tensor & out); // {"schema": "aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor linalg_solve_triangular(const Tensor & self, const Tensor & B, bool upper, bool left, bool unitriangular); // {"schema": "aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor linalg_vander(const Tensor & x, c10::optional N); // {"schema": "aten::linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple svd_out(const Tensor & self, bool some, bool compute_uv, Tensor & U, Tensor & S, Tensor & V); // {"schema": "aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)", "dispatch": "False", "default": "True"} +::std::tuple svd(const Tensor & self, bool some, bool compute_uv); // {"schema": "aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)", "dispatch": "False", "default": "True"} +Tensor swapaxes(const Tensor & self, int64_t axis0, int64_t axis1); // {"schema": "aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor & swapaxes_(Tensor & self, int64_t axis0, int64_t axis1); // {"schema": "aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor swapdims(const Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor & swapdims_(Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & cholesky_out(const Tensor & self, bool upper, Tensor & out); // {"schema": "aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cholesky(const Tensor & self, bool upper); // {"schema": "aten::cholesky(Tensor self, bool upper=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & cholesky_solve_out(const Tensor & self, const Tensor & input2, bool upper, Tensor & out); // {"schema": "aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor cholesky_solve(const Tensor & self, const Tensor & input2, bool upper); // {"schema": "aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper); // {"schema": "aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cholesky_inverse(const Tensor & self, bool upper); // {"schema": "aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & cholesky_inverse_out(const Tensor & self, bool upper, Tensor & out); // {"schema": "aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple qr_out(const Tensor & self, bool some, Tensor & Q, Tensor & R); // {"schema": "aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)", "dispatch": "False", "default": "True"} +::std::tuple qr(const Tensor & self, bool some); // {"schema": "aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)", "dispatch": "False", "default": "True"} +::std::tuple geqrf_out(const Tensor & self, Tensor & a, Tensor & tau); // {"schema": "aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)", "dispatch": "True", "default": "False"} +::std::tuple geqrf(const Tensor & self); // {"schema": "aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)", "dispatch": "True", "default": "False"} +Tensor orgqr(const Tensor & self, const Tensor & input2); // {"schema": "aten::orgqr(Tensor self, Tensor input2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & orgqr_out(const Tensor & self, const Tensor & input2, Tensor & out); // {"schema": "aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & ormqr_out(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose, Tensor & out); // {"schema": "aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose); // {"schema": "aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _lu_with_info(const Tensor & self, bool pivot, bool check_errors); // {"schema": "aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)", "dispatch": "False", "default": "True"} +Tensor & lu_solve_out(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots, Tensor & out); // {"schema": "aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor lu_solve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots); // {"schema": "aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple lu_unpack(const Tensor & LU_data, const Tensor & LU_pivots, bool unpack_data, bool unpack_pivots); // {"schema": "aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)", "dispatch": "True", "default": "True"} +::std::tuple lu_unpack_out(const Tensor & LU_data, const Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, Tensor & P, Tensor & L, Tensor & U); // {"schema": "aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)", "dispatch": "True", "default": "False"} +Tensor & multinomial_out(const Tensor & self, int64_t num_samples, bool replacement, c10::optional generator, Tensor & out); // {"schema": "aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor multinomial(const Tensor & self, int64_t num_samples, bool replacement, c10::optional generator); // {"schema": "aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & lgamma_out(const Tensor & self, Tensor & out); // {"schema": "aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & lgamma_(Tensor & self); // {"schema": "aten::lgamma_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor lgamma(const Tensor & self); // {"schema": "aten::lgamma(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & digamma_out(const Tensor & self, Tensor & out); // {"schema": "aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor digamma(const Tensor & self); // {"schema": "aten::digamma(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & polygamma_out(int64_t n, const Tensor & self, Tensor & out); // {"schema": "aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor polygamma(int64_t n, const Tensor & self); // {"schema": "aten::polygamma(int n, Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & polygamma_(Tensor & self, int64_t n); // {"schema": "aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor erfinv(const Tensor & self); // {"schema": "aten::erfinv(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & erfinv_(Tensor & self); // {"schema": "aten::erfinv_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & erfinv_out(const Tensor & self, Tensor & out); // {"schema": "aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor i0(const Tensor & self); // {"schema": "aten::i0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & i0_(Tensor & self); // {"schema": "aten::i0_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & i0_out(const Tensor & self, Tensor & out); // {"schema": "aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sign(const Tensor & self); // {"schema": "aten::sign(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sign_(Tensor & self); // {"schema": "aten::sign_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sign_out(const Tensor & self, Tensor & out); // {"schema": "aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor signbit(const Tensor & self); // {"schema": "aten::signbit(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & signbit_out(const Tensor & self, Tensor & out); // {"schema": "aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor dist(const Tensor & self, const Tensor & other, const Scalar & p); // {"schema": "aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & atan2_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & atan2_(Tensor & self, const Tensor & other); // {"schema": "aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor atan2(const Tensor & self, const Tensor & other); // {"schema": "aten::atan2(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor arctan2(const Tensor & self, const Tensor & other); // {"schema": "aten::arctan2(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arctan2_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arctan2_(Tensor & self, const Tensor & other); // {"schema": "aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & lerp_out(const Tensor & self, const Tensor & end, const Scalar & weight, Tensor & out); // {"schema": "aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & lerp_out(const Tensor & self, const Tensor & end, const Tensor & weight, Tensor & out); // {"schema": "aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lerp(const Tensor & self, const Tensor & end, const Scalar & weight); // {"schema": "aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor", "dispatch": "True", "default": "True"} +Tensor lerp(const Tensor & self, const Tensor & end, const Tensor & weight); // {"schema": "aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & histc_out(const Tensor & self, int64_t bins, const Scalar & min, const Scalar & max, Tensor & out); // {"schema": "aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor histc(const Tensor & self, int64_t bins, const Scalar & min, const Scalar & max); // {"schema": "aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple histogram_out(const Tensor & self, const Tensor & bins, const c10::optional & weight, bool density, Tensor & hist, Tensor & bin_edges); // {"schema": "aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)", "dispatch": "True", "default": "False"} +::std::tuple histogram(const Tensor & self, const Tensor & bins, const c10::optional & weight, bool density); // {"schema": "aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)", "dispatch": "True", "default": "False"} +::std::tuple histogram_out(const Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density, Tensor & hist, Tensor & bin_edges); // {"schema": "aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)", "dispatch": "True", "default": "False"} +::std::tuple histogram(const Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)", "dispatch": "True", "default": "False"} +::std::vector _histogramdd_bin_edges(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]", "dispatch": "True", "default": "False"} +Tensor _histogramdd_from_bin_cts(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _histogramdd_from_bin_tensors(const Tensor & self, TensorList bins, const c10::optional & weight, bool density); // {"schema": "aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple> histogramdd(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)", "dispatch": "False", "default": "True"} +::std::tuple> histogramdd(const Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)", "dispatch": "False", "default": "True"} +::std::tuple> histogramdd(const Tensor & self, TensorList bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)", "dispatch": "False", "default": "True"} +Tensor & fmod_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor fmod(const Tensor & self, const Scalar & other); // {"schema": "aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fmod_(Tensor & self, const Scalar & other); // {"schema": "aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & fmod_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fmod(const Tensor & self, const Tensor & other); // {"schema": "aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fmod_(Tensor & self, const Tensor & other); // {"schema": "aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hypot_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hypot(const Tensor & self, const Tensor & other); // {"schema": "aten::hypot(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & hypot_(Tensor & self, const Tensor & other); // {"schema": "aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & igamma_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor igamma(const Tensor & self, const Tensor & other); // {"schema": "aten::igamma(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & igamma_(Tensor & self, const Tensor & other); // {"schema": "aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & igammac_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor igammac(const Tensor & self, const Tensor & other); // {"schema": "aten::igammac(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & igammac_(Tensor & self, const Tensor & other); // {"schema": "aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & nextafter_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nextafter(const Tensor & self, const Tensor & other); // {"schema": "aten::nextafter(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & nextafter_(Tensor & self, const Tensor & other); // {"schema": "aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & remainder_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor remainder(const Tensor & self, const Scalar & other); // {"schema": "aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & remainder_(Tensor & self, const Scalar & other); // {"schema": "aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & remainder_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor remainder(const Tensor & self, const Tensor & other); // {"schema": "aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & remainder_(Tensor & self, const Tensor & other); // {"schema": "aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor remainder(const Scalar & self, const Tensor & other); // {"schema": "aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor min(const Tensor & self); // {"schema": "aten::min(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & min_out(const Tensor & self, Tensor & out); // {"schema": "aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fmin(const Tensor & self, const Tensor & other); // {"schema": "aten::fmin(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fmin_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max(const Tensor & self); // {"schema": "aten::max(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor fmax(const Tensor & self, const Tensor & other); // {"schema": "aten::fmax(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fmax_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor maximum(const Tensor & self, const Tensor & other); // {"schema": "aten::maximum(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & maximum_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max(const Tensor & self, const Tensor & other); // {"schema": "aten::max.other(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & max_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & max_out(const Tensor & self, Tensor & out); // {"schema": "aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor minimum(const Tensor & self, const Tensor & other); // {"schema": "aten::minimum(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & minimum_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & min_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor min(const Tensor & self, const Tensor & other); // {"schema": "aten::min.other(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantile(const Tensor & self, const Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation); // {"schema": "aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", "dispatch": "False", "default": "True"} +Tensor & quantile_out(const Tensor & self, const Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, Tensor & out); // {"schema": "aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor quantile(const Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation); // {"schema": "aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", "dispatch": "False", "default": "True"} +Tensor & quantile_out(const Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, Tensor & out); // {"schema": "aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nanquantile(const Tensor & self, const Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation); // {"schema": "aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nanquantile_out(const Tensor & self, const Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, Tensor & out); // {"schema": "aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nanquantile(const Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation); // {"schema": "aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nanquantile_out(const Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, Tensor & out); // {"schema": "aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple sort_out(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices); // {"schema": "aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "True"} +::std::tuple sort_out(const Tensor & self, c10::optional stable, int64_t dim, bool descending, Tensor & values, Tensor & indices); // {"schema": "aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple sort(const Tensor & self, int64_t dim, bool descending); // {"schema": "aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple sort(const Tensor & self, c10::optional stable, int64_t dim, bool descending); // {"schema": "aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple sort_out(const Tensor & self, Dimname dim, bool descending, Tensor & values, Tensor & indices); // {"schema": "aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +::std::tuple sort_out(const Tensor & self, c10::optional stable, Dimname dim, bool descending, Tensor & values, Tensor & indices); // {"schema": "aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +::std::tuple sort(const Tensor & self, Dimname dim, bool descending); // {"schema": "aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple sort(const Tensor & self, c10::optional stable, Dimname dim, bool descending); // {"schema": "aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +Tensor & msort_out(const Tensor & self, Tensor & out); // {"schema": "aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor msort(const Tensor & self); // {"schema": "aten::msort(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor argsort(const Tensor & self, int64_t dim, bool descending); // {"schema": "aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor argsort(const Tensor & self, bool stable, int64_t dim, bool descending); // {"schema": "aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor argsort(const Tensor & self, Dimname dim, bool descending); // {"schema": "aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple topk_out(const Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted, Tensor & values, Tensor & indices); // {"schema": "aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple topk(const Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted); // {"schema": "aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +Tensor all(const Tensor & self); // {"schema": "aten::all(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & all_out(const Tensor & self, Tensor & out); // {"schema": "aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor any(const Tensor & self); // {"schema": "aten::any(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & any_out(const Tensor & self, Tensor & out); // {"schema": "aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & renorm_out(const Tensor & self, const Scalar & p, int64_t dim, const Scalar & maxnorm, Tensor & out); // {"schema": "aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor renorm(const Tensor & self, const Scalar & p, int64_t dim, const Scalar & maxnorm); // {"schema": "aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & renorm_(Tensor & self, const Scalar & p, int64_t dim, const Scalar & maxnorm); // {"schema": "aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor unfold(const Tensor & self, int64_t dimension, int64_t size, int64_t step); // {"schema": "aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor unfold_backward(const Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step); // {"schema": "aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor", "dispatch": "True", "default": "False"} +bool equal(const Tensor & self, const Tensor & other); // {"schema": "aten::equal(Tensor self, Tensor other) -> bool", "dispatch": "True", "default": "False"} +Tensor & pow_out(const Tensor & self, const Tensor & exponent, Tensor & out); // {"schema": "aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor pow(const Tensor & self, const Tensor & exponent); // {"schema": "aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & pow_out(const Scalar & self, const Tensor & exponent, Tensor & out); // {"schema": "aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor pow(const Scalar & self, const Tensor & exponent); // {"schema": "aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & pow_out(const Tensor & self, const Scalar & exponent, Tensor & out); // {"schema": "aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor pow(const Tensor & self, const Scalar & exponent); // {"schema": "aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & pow_(Tensor & self, const Scalar & exponent); // {"schema": "aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & pow_(Tensor & self, const Tensor & exponent); // {"schema": "aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & float_power_out(const Tensor & self, const Tensor & exponent, Tensor & out); // {"schema": "aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor float_power(const Tensor & self, const Tensor & exponent); // {"schema": "aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & float_power_out(const Scalar & self, const Tensor & exponent, Tensor & out); // {"schema": "aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor float_power(const Scalar & self, const Tensor & exponent); // {"schema": "aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & float_power_out(const Tensor & self, const Scalar & exponent, Tensor & out); // {"schema": "aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor float_power(const Tensor & self, const Scalar & exponent); // {"schema": "aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & float_power_(Tensor & self, const Scalar & exponent); // {"schema": "aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & float_power_(Tensor & self, const Tensor & exponent); // {"schema": "aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & normal_(Tensor & self, double mean, double std, c10::optional generator); // {"schema": "aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor normal_functional(const Tensor & self, double mean, double std, c10::optional generator); // {"schema": "aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & normal_out(const Tensor & mean, double std, c10::optional generator, Tensor & out); // {"schema": "aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor normal(const Tensor & mean, double std, c10::optional generator); // {"schema": "aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & normal_out(double mean, const Tensor & std, c10::optional generator, Tensor & out); // {"schema": "aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor normal(double mean, const Tensor & std, c10::optional generator); // {"schema": "aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & normal_out(const Tensor & mean, const Tensor & std, c10::optional generator, Tensor & out); // {"schema": "aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor normal(const Tensor & mean, const Tensor & std, c10::optional generator); // {"schema": "aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor normal(double mean, double std, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & normal_out(double mean, double std, c10::SymIntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor alias(const Tensor & self); // {"schema": "aten::alias(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +void _amp_foreach_non_finite_check_and_unscale_(TensorList self, Tensor & found_inf, const Tensor & inv_scale); // {"schema": "aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()", "dispatch": "True", "default": "False"} +Tensor & _amp_update_scale_(Tensor & self, Tensor & growth_tracker, const Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); // {"schema": "aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::vector _foreach_add(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_add_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_add(TensorList self, TensorList other, const Scalar & alpha); // {"schema": "aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_add_(TensorList self, TensorList other, const Scalar & alpha); // {"schema": "aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_add(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_add_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_add(TensorList self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::_foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_add_(TensorList self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::_foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sub(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sub_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sub(TensorList self, TensorList other, const Scalar & alpha); // {"schema": "aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sub_(TensorList self, TensorList other, const Scalar & alpha); // {"schema": "aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sub(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sub_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_mul(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_mul_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_mul(TensorList self, TensorList other); // {"schema": "aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_mul_(TensorList self, TensorList other); // {"schema": "aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_mul(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_mul_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_mul(TensorList self, const Tensor & other); // {"schema": "aten::_foreach_mul.Tensor(Tensor[] self, Tensor other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_mul_(TensorList self, const Tensor & other); // {"schema": "aten::_foreach_mul_.Tensor(Tensor(a!)[] self, Tensor other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_div(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_div_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_div(TensorList self, TensorList other); // {"schema": "aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_div_(TensorList self, TensorList other); // {"schema": "aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_div(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_div_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_div(TensorList self, const Tensor & other); // {"schema": "aten::_foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_div_(TensorList self, const Tensor & other); // {"schema": "aten::_foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_clamp_max(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_clamp_max_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_clamp_max(TensorList self, TensorList other); // {"schema": "aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_clamp_max_(TensorList self, TensorList other); // {"schema": "aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_clamp_max(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_clamp_max_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_clamp_min(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_clamp_min_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_clamp_min(TensorList self, TensorList other); // {"schema": "aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_clamp_min_(TensorList self, TensorList other); // {"schema": "aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_clamp_min(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_clamp_min_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_maximum(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_maximum_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_maximum(TensorList self, TensorList other); // {"schema": "aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_maximum_(TensorList self, TensorList other); // {"schema": "aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_maximum(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_maximum_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_minimum(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_minimum_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_minimum(TensorList self, TensorList other); // {"schema": "aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_minimum_(TensorList self, TensorList other); // {"schema": "aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_minimum(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_minimum_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcdiv(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value); // {"schema": "aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcdiv(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars); // {"schema": "aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcdiv(TensorList self, TensorList tensor1, TensorList tensor2, const Tensor & scalars); // {"schema": "aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_addcdiv_(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value); // {"schema": "aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()", "dispatch": "True", "default": "False"} +void _foreach_addcdiv_(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars); // {"schema": "aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +void _foreach_addcdiv_(TensorList self, TensorList tensor1, TensorList tensor2, const Tensor & scalars); // {"schema": "aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcmul(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value); // {"schema": "aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcmul(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars); // {"schema": "aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcmul(TensorList self, TensorList tensor1, TensorList tensor2, const Tensor & scalars); // {"schema": "aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_addcmul_(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value); // {"schema": "aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()", "dispatch": "True", "default": "False"} +void _foreach_addcmul_(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars); // {"schema": "aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +void _foreach_addcmul_(TensorList self, TensorList tensor1, TensorList tensor2, const Tensor & scalars); // {"schema": "aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_abs(TensorList self); // {"schema": "aten::_foreach_abs(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_abs_(TensorList self); // {"schema": "aten::_foreach_abs_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_acos(TensorList self); // {"schema": "aten::_foreach_acos(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_acos_(TensorList self); // {"schema": "aten::_foreach_acos_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_asin(TensorList self); // {"schema": "aten::_foreach_asin(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_asin_(TensorList self); // {"schema": "aten::_foreach_asin_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_atan(TensorList self); // {"schema": "aten::_foreach_atan(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_atan_(TensorList self); // {"schema": "aten::_foreach_atan_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_ceil(TensorList self); // {"schema": "aten::_foreach_ceil(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_ceil_(TensorList self); // {"schema": "aten::_foreach_ceil_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_cos(TensorList self); // {"schema": "aten::_foreach_cos(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_cos_(TensorList self); // {"schema": "aten::_foreach_cos_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_cosh(TensorList self); // {"schema": "aten::_foreach_cosh(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_cosh_(TensorList self); // {"schema": "aten::_foreach_cosh_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_erf(TensorList self); // {"schema": "aten::_foreach_erf(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_erf_(TensorList self); // {"schema": "aten::_foreach_erf_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_erfc(TensorList self); // {"schema": "aten::_foreach_erfc(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_erfc_(TensorList self); // {"schema": "aten::_foreach_erfc_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_exp(TensorList self); // {"schema": "aten::_foreach_exp(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_exp_(TensorList self); // {"schema": "aten::_foreach_exp_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_expm1(TensorList self); // {"schema": "aten::_foreach_expm1(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_expm1_(TensorList self); // {"schema": "aten::_foreach_expm1_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_floor(TensorList self); // {"schema": "aten::_foreach_floor(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_floor_(TensorList self); // {"schema": "aten::_foreach_floor_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_frac(TensorList self); // {"schema": "aten::_foreach_frac(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_frac_(TensorList self); // {"schema": "aten::_foreach_frac_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_lerp(TensorList self, TensorList tensors1, TensorList weights); // {"schema": "aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_lerp_(TensorList self, TensorList tensors1, TensorList weights); // {"schema": "aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_lerp(TensorList self, TensorList tensors1, const Scalar & weight); // {"schema": "aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_lerp_(TensorList self, TensorList tensors1, const Scalar & weight); // {"schema": "aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_lgamma(TensorList self); // {"schema": "aten::_foreach_lgamma(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_lgamma_(TensorList self); // {"schema": "aten::_foreach_lgamma_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_log(TensorList self); // {"schema": "aten::_foreach_log(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_log_(TensorList self); // {"schema": "aten::_foreach_log_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_log10(TensorList self); // {"schema": "aten::_foreach_log10(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_log10_(TensorList self); // {"schema": "aten::_foreach_log10_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_log1p(TensorList self); // {"schema": "aten::_foreach_log1p(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_log1p_(TensorList self); // {"schema": "aten::_foreach_log1p_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_log2(TensorList self); // {"schema": "aten::_foreach_log2(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_log2_(TensorList self); // {"schema": "aten::_foreach_log2_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_neg(TensorList self); // {"schema": "aten::_foreach_neg(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_neg_(TensorList self); // {"schema": "aten::_foreach_neg_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_norm(TensorList self, const Scalar & ord); // {"schema": "aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_pow(TensorList self, TensorList exponent); // {"schema": "aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_pow(TensorList self, const Scalar & exponent); // {"schema": "aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_pow(TensorList self, ArrayRef exponent); // {"schema": "aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_pow(const Scalar & self, TensorList exponent); // {"schema": "aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_pow_(TensorList self, TensorList exponent); // {"schema": "aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> ()", "dispatch": "True", "default": "False"} +void _foreach_pow_(TensorList self, const Scalar & exponent); // {"schema": "aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> ()", "dispatch": "True", "default": "False"} +void _foreach_pow_(TensorList self, ArrayRef exponent); // {"schema": "aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_reciprocal(TensorList self); // {"schema": "aten::_foreach_reciprocal(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_reciprocal_(TensorList self); // {"schema": "aten::_foreach_reciprocal_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_round(TensorList self); // {"schema": "aten::_foreach_round(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_round_(TensorList self); // {"schema": "aten::_foreach_round_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sigmoid(TensorList self); // {"schema": "aten::_foreach_sigmoid(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sigmoid_(TensorList self); // {"schema": "aten::_foreach_sigmoid_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sign(TensorList self); // {"schema": "aten::_foreach_sign(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sign_(TensorList self); // {"schema": "aten::_foreach_sign_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sin(TensorList self); // {"schema": "aten::_foreach_sin(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sin_(TensorList self); // {"schema": "aten::_foreach_sin_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sinh(TensorList self); // {"schema": "aten::_foreach_sinh(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sinh_(TensorList self); // {"schema": "aten::_foreach_sinh_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sqrt(TensorList self); // {"schema": "aten::_foreach_sqrt(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sqrt_(TensorList self); // {"schema": "aten::_foreach_sqrt_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_tan(TensorList self); // {"schema": "aten::_foreach_tan(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_tan_(TensorList self); // {"schema": "aten::_foreach_tan_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_tanh(TensorList self); // {"schema": "aten::_foreach_tanh(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_tanh_(TensorList self); // {"schema": "aten::_foreach_tanh_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_trunc(TensorList self); // {"schema": "aten::_foreach_trunc(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_trunc_(TensorList self); // {"schema": "aten::_foreach_trunc_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +void _foreach_zero_(TensorList self); // {"schema": "aten::_foreach_zero_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +void _foreach_copy_(TensorList self, TensorList src, bool non_blocking); // {"schema": "aten::_foreach_copy_(Tensor(a!)[] self, Tensor[] src, bool non_blocking=False) -> ()", "dispatch": "True", "default": "False"} +Tensor bucketize(const Tensor & self, const Tensor & boundaries, bool out_int32, bool right); // {"schema": "aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & bucketize_out(const Tensor & self, const Tensor & boundaries, bool out_int32, bool right, Tensor & out); // {"schema": "aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bucketize(const Scalar & self, const Tensor & boundaries, bool out_int32, bool right); // {"schema": "aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor searchsorted(const Tensor & sorted_sequence, const Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter); // {"schema": "aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & searchsorted_out(const Tensor & sorted_sequence, const Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, Tensor & out); // {"schema": "aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor searchsorted(const Tensor & sorted_sequence, const Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter); // {"schema": "aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & searchsorted_out(const Tensor & sorted_sequence, const Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, Tensor & out); // {"schema": "aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _convert_indices_from_coo_to_csr(const Tensor & self, int64_t size, bool out_int32); // {"schema": "aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _convert_indices_from_coo_to_csr_out(const Tensor & self, int64_t size, bool out_int32, Tensor & out); // {"schema": "aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _convert_indices_from_csr_to_coo(const Tensor & crow_indices, const Tensor & col_indices, bool out_int32, bool transpose); // {"schema": "aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _convert_indices_from_csr_to_coo_out(const Tensor & crow_indices, const Tensor & col_indices, bool out_int32, bool transpose, Tensor & out); // {"schema": "aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & mse_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, Tensor & out); // {"schema": "aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mse_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, Tensor & grad_input); // {"schema": "aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor", "dispatch": "True", "default": "False"} +Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & multi_margin_loss_out(const Tensor & self, const Tensor & target, const Scalar & p, const Scalar & margin, const c10::optional & weight, int64_t reduction, Tensor & out); // {"schema": "aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor multi_margin_loss(const Tensor & self, const Tensor & target, const Scalar & p, const Scalar & margin, const c10::optional & weight, int64_t reduction); // {"schema": "aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & multi_margin_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Scalar & p, const Scalar & margin, const c10::optional & weight, int64_t reduction, Tensor & grad_input); // {"schema": "aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Scalar & p, const Scalar & margin, const c10::optional & weight, int64_t reduction); // {"schema": "aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & multilabel_margin_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, Tensor & out); // {"schema": "aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple multilabel_margin_loss_forward_out(const Tensor & self, const Tensor & target, int64_t reduction, Tensor & output, Tensor & is_target); // {"schema": "aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)", "dispatch": "True", "default": "False"} +Tensor & multilabel_margin_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target, Tensor & grad_input); // {"schema": "aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target); // {"schema": "aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & nll_loss_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, Tensor & out); // {"schema": "aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nll_loss_nd(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); // {"schema": "aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor", "dispatch": "False", "default": "True"} +Tensor nll_loss(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); // {"schema": "aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple nll_loss_forward_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, Tensor & output, Tensor & total_weight); // {"schema": "aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple nll_loss_forward(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); // {"schema": "aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)", "dispatch": "True", "default": "True"} +Tensor & nll_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const Tensor & total_weight, Tensor & grad_input); // {"schema": "aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const Tensor & total_weight); // {"schema": "aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & nll_loss2d_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, Tensor & out); // {"schema": "aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nll_loss2d(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); // {"schema": "aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple nll_loss2d_forward_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, Tensor & output, Tensor & total_weight); // {"schema": "aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple nll_loss2d_forward(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); // {"schema": "aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)", "dispatch": "True", "default": "False"} +Tensor & nll_loss2d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const Tensor & total_weight, Tensor & grad_input); // {"schema": "aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const Tensor & total_weight); // {"schema": "aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & smooth_l1_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, double beta, Tensor & out); // {"schema": "aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction, double beta); // {"schema": "aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & smooth_l1_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double beta, Tensor & grad_input); // {"schema": "aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double beta); // {"schema": "aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & huber_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, double delta, Tensor & out); // {"schema": "aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor huber_loss(const Tensor & self, const Tensor & target, int64_t reduction, double delta); // {"schema": "aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & huber_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double delta, Tensor & grad_input); // {"schema": "aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor huber_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double delta); // {"schema": "aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & soft_margin_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, Tensor & out); // {"schema": "aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & soft_margin_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, Tensor & grad_input); // {"schema": "aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & elu_out(const Tensor & self, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale, Tensor & out); // {"schema": "aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor elu(const Tensor & self, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale); // {"schema": "aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & elu_backward_out(const Tensor & grad_output, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale, bool is_result, const Tensor & self_or_result, Tensor & grad_input); // {"schema": "aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor elu_backward(const Tensor & grad_output, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale, bool is_result, const Tensor & self_or_result); // {"schema": "aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & elu_(Tensor & self, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale); // {"schema": "aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & glu_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor glu(const Tensor & self, int64_t dim); // {"schema": "aten::glu(Tensor self, int dim=-1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & glu_backward_out(const Tensor & grad_output, const Tensor & self, int64_t dim, Tensor & grad_input); // {"schema": "aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim); // {"schema": "aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor glu_jvp(const Tensor & glu, const Tensor & x, const Tensor & dx, int64_t dim); // {"schema": "aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor glu_backward_jvp(const Tensor & grad_x, const Tensor & grad_glu, const Tensor & x, const Tensor & dgrad_glu, const Tensor & dx, int64_t dim); // {"schema": "aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & hardsigmoid_out(const Tensor & self, Tensor & out); // {"schema": "aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardsigmoid(const Tensor & self); // {"schema": "aten::hardsigmoid(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & hardsigmoid_(Tensor & self); // {"schema": "aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hardsigmoid_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & grad_input); // {"schema": "aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardsigmoid_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & hardtanh_out(const Tensor & self, const Scalar & min_val, const Scalar & max_val, Tensor & out); // {"schema": "aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardtanh(const Tensor & self, const Scalar & min_val, const Scalar & max_val); // {"schema": "aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & hardtanh_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & min_val, const Scalar & max_val, Tensor & grad_input); // {"schema": "aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, const Scalar & min_val, const Scalar & max_val); // {"schema": "aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & hardtanh_(Tensor & self, const Scalar & min_val, const Scalar & max_val); // {"schema": "aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & hardswish_out(const Tensor & self, Tensor & out); // {"schema": "aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardswish(const Tensor & self); // {"schema": "aten::hardswish(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & hardswish_(Tensor & self); // {"schema": "aten::hardswish_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardswish_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & leaky_relu_out(const Tensor & self, const Scalar & negative_slope, Tensor & out); // {"schema": "aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor leaky_relu(const Tensor & self, const Scalar & negative_slope); // {"schema": "aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & leaky_relu_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & negative_slope, bool self_is_result, Tensor & grad_input); // {"schema": "aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, const Scalar & negative_slope, bool self_is_result); // {"schema": "aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & leaky_relu_(Tensor & self, const Scalar & negative_slope); // {"schema": "aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log_sigmoid_out(const Tensor & self, Tensor & out); // {"schema": "aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor log_sigmoid(const Tensor & self); // {"schema": "aten::log_sigmoid(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple log_sigmoid_forward_out(const Tensor & self, Tensor & output, Tensor & buffer); // {"schema": "aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple log_sigmoid_forward(const Tensor & self); // {"schema": "aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)", "dispatch": "True", "default": "False"} +Tensor & log_sigmoid_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & buffer, Tensor & grad_input); // {"schema": "aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer); // {"schema": "aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & rrelu_with_noise_out(const Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator, Tensor & out); // {"schema": "aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator); // {"schema": "aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, bool self_is_result); // {"schema": "aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator); // {"schema": "aten::rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & softplus_out(const Tensor & self, const Scalar & beta, const Scalar & threshold, Tensor & out); // {"schema": "aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor softplus(const Tensor & self, const Scalar & beta, const Scalar & threshold); // {"schema": "aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & softplus_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & beta, const Scalar & threshold, Tensor & grad_input); // {"schema": "aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, const Scalar & beta, const Scalar & threshold); // {"schema": "aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & softshrink_out(const Tensor & self, const Scalar & lambd, Tensor & out); // {"schema": "aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor softshrink(const Tensor & self, const Scalar & lambd); // {"schema": "aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & softshrink_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & lambd, Tensor & grad_input); // {"schema": "aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, const Scalar & lambd); // {"schema": "aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & adaptive_avg_pool2d_out(const Tensor & self, c10::SymIntArrayRef output_size, Tensor & out); // {"schema": "aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor adaptive_avg_pool2d(const Tensor & self, c10::SymIntArrayRef output_size); // {"schema": "aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor", "dispatch": "False", "default": "True"} +Tensor mkldnn_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & mkldnn_adaptive_avg_pool2d_out(const Tensor & self, IntArrayRef output_size, Tensor & out); // {"schema": "aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mkldnn_adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _adaptive_avg_pool2d(const Tensor & self, c10::SymIntArrayRef output_size); // {"schema": "aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & adaptive_avg_pool3d_out(const Tensor & self, c10::SymIntArrayRef output_size, Tensor & out); // {"schema": "aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor adaptive_avg_pool3d(const Tensor & self, c10::SymIntArrayRef output_size); // {"schema": "aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _adaptive_avg_pool3d(const Tensor & self, c10::SymIntArrayRef output_size); // {"schema": "aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & adaptive_avg_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & grad_input); // {"schema": "aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple adaptive_max_pool2d_out(const Tensor & self, IntArrayRef output_size, Tensor & out, Tensor & indices); // {"schema": "aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple adaptive_max_pool2d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & adaptive_max_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices); // {"schema": "aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple adaptive_max_pool3d_out(const Tensor & self, IntArrayRef output_size, Tensor & out, Tensor & indices); // {"schema": "aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & adaptive_max_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices); // {"schema": "aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & avg_pool2d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, Tensor & out); // {"schema": "aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); // {"schema": "aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & avg_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, Tensor & grad_input); // {"schema": "aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); // {"schema": "aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & avg_pool3d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, Tensor & out); // {"schema": "aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); // {"schema": "aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & avg_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, Tensor & grad_input); // {"schema": "aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); // {"schema": "aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple fractional_max_pool2d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples, Tensor & output, Tensor & indices); // {"schema": "aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple fractional_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); // {"schema": "aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & fractional_max_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fractional_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); // {"schema": "aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple fractional_max_pool3d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples, Tensor & output, Tensor & indices); // {"schema": "aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); // {"schema": "aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & fractional_max_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); // {"schema": "aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple max_pool2d_with_indices_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out, Tensor & indices); // {"schema": "aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple max_pool2d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & max_pool2d_with_indices_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max_pool2d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); // {"schema": "aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple max_pool3d_with_indices_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out, Tensor & indices); // {"schema": "aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & max_pool3d_with_indices_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); // {"schema": "aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & max_unpool2d_out(const Tensor & self, const Tensor & indices, c10::SymIntArrayRef output_size, Tensor & out); // {"schema": "aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max_unpool2d(const Tensor & self, const Tensor & indices, c10::SymIntArrayRef output_size); // {"schema": "aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & max_unpool3d_out(const Tensor & self, const Tensor & indices, c10::SymIntArrayRef output_size, IntArrayRef stride, IntArrayRef padding, Tensor & out); // {"schema": "aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max_unpool3d(const Tensor & self, const Tensor & indices, c10::SymIntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); // {"schema": "aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & reflection_pad1d_out(const Tensor & self, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad1d(const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & reflection_pad1d_backward_out(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding, Tensor & grad_input); // {"schema": "aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & reflection_pad2d_out(const Tensor & self, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad2d(const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & reflection_pad2d_backward_out(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding, Tensor & grad_input); // {"schema": "aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & reflection_pad3d_out(const Tensor & self, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad3d(const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & reflection_pad3d_backward_out(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding, Tensor & grad_input); // {"schema": "aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad3d_backward(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad1d_out(const Tensor & self, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad1d(const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad1d_backward_out(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding, Tensor & grad_input); // {"schema": "aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad2d_out(const Tensor & self, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad2d(const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad2d_backward_out(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding, Tensor & grad_input); // {"schema": "aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & replication_pad3d_out(const Tensor & self, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad3d(const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad3d_backward_out(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding, Tensor & grad_input); // {"schema": "aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, c10::SymIntArrayRef padding); // {"schema": "aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _pad_circular(const Tensor & self, c10::SymIntArrayRef pad); // {"schema": "aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _pad_enum(const Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional value); // {"schema": "aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor pad(const Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional value); // {"schema": "aten::pad(Tensor self, SymInt[] pad, str mode=\"constant\", float? value=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_linear1d(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_bilinear2d(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _upsample_bilinear2d_aa(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_trilinear3d(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_bicubic2d(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _upsample_bicubic2d_aa(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_nearest1d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _upsample_nearest_exact1d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_nearest2d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _upsample_nearest_exact2d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_nearest3d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _upsample_nearest_exact3d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & upsample_linear1d_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales, Tensor & out); // {"schema": "aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_linear1d(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales); // {"schema": "aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_linear1d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales, Tensor & grad_input); // {"schema": "aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_linear1d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales); // {"schema": "aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_bilinear2d_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_bilinear2d(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_bilinear2d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_bilinear2d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _upsample_bilinear2d_aa_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _upsample_bilinear2d_aa(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _upsample_bilinear2d_aa_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _upsample_bilinear2d_aa_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_bicubic2d_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_bicubic2d(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_bicubic2d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_bicubic2d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _upsample_bicubic2d_aa_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _upsample_bicubic2d_aa(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _upsample_bicubic2d_aa_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _upsample_bicubic2d_aa_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_trilinear3d_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_trilinear3d(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_trilinear3d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_trilinear3d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest1d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, Tensor & out); // {"schema": "aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact1d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, Tensor & out); // {"schema": "aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest1d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales); // {"schema": "aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact1d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales); // {"schema": "aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest1d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, Tensor & grad_input); // {"schema": "aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact1d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, Tensor & grad_input); // {"schema": "aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest1d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales); // {"schema": "aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact1d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales); // {"schema": "aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest2d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact2d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest2d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact2d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest2d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact2d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest2d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact2d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest3d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact3d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest3d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact3d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest3d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact3d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest3d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact3d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sigmoid_backward_out(const Tensor & grad_output, const Tensor & output, Tensor & grad_input); // {"schema": "aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output); // {"schema": "aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logit_backward_out(const Tensor & grad_output, const Tensor & self, c10::optional eps, Tensor & grad_input); // {"schema": "aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logit_backward(const Tensor & grad_output, const Tensor & self, c10::optional eps); // {"schema": "aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tanh_backward_out(const Tensor & grad_output, const Tensor & output, Tensor & grad_input); // {"schema": "aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor tanh_backward(const Tensor & grad_output, const Tensor & output); // {"schema": "aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & slow_conv_transpose2d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, Tensor & out); // {"schema": "aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor slow_conv_transpose2d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation); // {"schema": "aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & slow_conv_transpose3d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, Tensor & out); // {"schema": "aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor slow_conv_transpose3d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation); // {"schema": "aten::slow_conv_transpose3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & thnn_conv2d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); // {"schema": "aten::thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & _slow_conv2d_forward_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, Tensor & output); // {"schema": "aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) output) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _slow_conv2d_forward(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); // {"schema": "aten::_slow_conv2d_forward(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _slow_conv2d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias); // {"schema": "aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "False"} +::std::tuple _slow_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask); // {"schema": "aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)", "dispatch": "True", "default": "False"} +const Tensor & _conv_depthwise2d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, const Tensor & out); // {"schema": "aten::_conv_depthwise2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _conv_depthwise2d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); // {"schema": "aten::_conv_depthwise2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation) -> Tensor", "dispatch": "True", "default": "False"} +Tensor conv_depthwise3d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); // {"schema": "aten::conv_depthwise3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & slow_conv3d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, Tensor & out); // {"schema": "aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor slow_conv3d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); // {"schema": "aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & slow_conv3d_forward_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, Tensor & output); // {"schema": "aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor slow_conv3d_forward(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); // {"schema": "aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor slow_conv_dilated2d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); // {"schema": "aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor slow_conv_dilated3d(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); // {"schema": "aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & col2im_out(const Tensor & self, c10::SymIntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor & out); // {"schema": "aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor col2im(const Tensor & self, c10::SymIntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); // {"schema": "aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor", "dispatch": "True", "default": "False"} +Tensor column_stack(TensorList tensors); // {"schema": "aten::column_stack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & column_stack_out(TensorList tensors, Tensor & out); // {"schema": "aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & im2col_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor & out); // {"schema": "aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor im2col(const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); // {"schema": "aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor", "dispatch": "True", "default": "False"} +Tensor isfinite(const Tensor & self); // {"schema": "aten::isfinite(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor isinf(const Tensor & self); // {"schema": "aten::isinf(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +void record_stream(Tensor & self, Stream s); // {"schema": "aten::record_stream(Tensor(a!) self, Stream s) -> ()", "dispatch": "True", "default": "False"} +Tensor isposinf(const Tensor & self); // {"schema": "aten::isposinf(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isposinf_out(const Tensor & self, Tensor & out); // {"schema": "aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor isneginf(const Tensor & self); // {"schema": "aten::isneginf(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isneginf_out(const Tensor & self, Tensor & out); // {"schema": "aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _add_batch_dim(const Tensor & self, int64_t batch_dim, int64_t level); // {"schema": "aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _remove_batch_dim(const Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim); // {"schema": "aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_entr(const Tensor & self); // {"schema": "aten::special_entr(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_entr_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_ndtri(const Tensor & self); // {"schema": "aten::special_ndtri(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_ndtri_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_log_ndtr(const Tensor & self); // {"schema": "aten::special_log_ndtr(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_log_ndtr_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_expm1(const Tensor & self); // {"schema": "aten::special_expm1(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_expm1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_exp2(const Tensor & self); // {"schema": "aten::special_exp2(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_exp2_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_psi(const Tensor & self); // {"schema": "aten::special_psi(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_psi_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_digamma(const Tensor & self); // {"schema": "aten::special_digamma(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_digamma_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_gammaln(const Tensor & self); // {"schema": "aten::special_gammaln(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_gammaln_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_erf(const Tensor & self); // {"schema": "aten::special_erf(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_erf_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_erfc(const Tensor & self); // {"schema": "aten::special_erfc(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_erfc_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_erfcx(const Tensor & self); // {"schema": "aten::special_erfcx(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_erfcx_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_erfinv(const Tensor & self); // {"schema": "aten::special_erfinv(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_erfinv_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_ndtr(const Tensor & self); // {"schema": "aten::special_ndtr(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_ndtr_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_xlog1py(const Tensor & self, const Tensor & other); // {"schema": "aten::special_xlog1py(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_xlog1py(const Scalar & self, const Tensor & other); // {"schema": "aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_xlog1py(const Tensor & self, const Scalar & other); // {"schema": "aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_xlog1py_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_xlog1py_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_xlog1py_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_xlogy(const Tensor & self, const Tensor & other); // {"schema": "aten::special_xlogy(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_xlogy(const Scalar & self, const Tensor & other); // {"schema": "aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_xlogy(const Tensor & self, const Scalar & other); // {"schema": "aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_xlogy_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_xlogy_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_xlogy_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_zeta(const Tensor & self, const Tensor & other); // {"schema": "aten::special_zeta(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_zeta(const Scalar & self, const Tensor & other); // {"schema": "aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_zeta(const Tensor & self, const Scalar & other); // {"schema": "aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_zeta_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_zeta_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_zeta_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_i0(const Tensor & self); // {"schema": "aten::special_i0(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_i0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_i0e(const Tensor & self); // {"schema": "aten::special_i0e(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_i0e_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_i1(const Tensor & self); // {"schema": "aten::special_i1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_i1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_i1e(const Tensor & self); // {"schema": "aten::special_i1e(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_i1e_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_logit(const Tensor & self, c10::optional eps); // {"schema": "aten::special_logit(Tensor self, float? eps=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_logit_out(const Tensor & self, c10::optional eps, Tensor & out); // {"schema": "aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_polygamma(int64_t n, const Tensor & self); // {"schema": "aten::special_polygamma(int n, Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_polygamma_out(int64_t n, const Tensor & self, Tensor & out); // {"schema": "aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_logsumexp(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_logsumexp_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_expit(const Tensor & self); // {"schema": "aten::special_expit(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_expit_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_sinc(const Tensor & self); // {"schema": "aten::special_sinc(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_sinc_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_round(const Tensor & self, int64_t decimals); // {"schema": "aten::special_round(Tensor self, *, int decimals=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_round_out(const Tensor & self, int64_t decimals, Tensor & out); // {"schema": "aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_log1p(const Tensor & self); // {"schema": "aten::special_log1p(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_log1p_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_log_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_gammainc_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_gammainc(const Tensor & self, const Tensor & other); // {"schema": "aten::special_gammainc(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_gammaincc_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_gammaincc(const Tensor & self, const Tensor & other); // {"schema": "aten::special_gammaincc(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_multigammaln(const Tensor & self, int64_t p); // {"schema": "aten::special_multigammaln(Tensor self, int p) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_multigammaln_out(const Tensor & self, int64_t p, Tensor & out); // {"schema": "aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fft_fft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_fft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ifft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_ifft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_rfft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_rfft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_irfft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_irfft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_hfft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_hfft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ihfft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_ihfft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_fft2(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_fft2_out(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ifft2(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_ifft2_out(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_rfft2(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_rfft2_out(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_irfft2(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_irfft2_out(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_hfft2(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +const Tensor & fft_hfft2_out(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm, const Tensor & out); // {"schema": "aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ihfft2(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +const Tensor & fft_ihfft2_out(const Tensor & self, OptionalSymIntArrayRef s, IntArrayRef dim, c10::optional norm, const Tensor & out); // {"schema": "aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_fftn(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_fftn_out(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ifftn(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_ifftn_out(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_rfftn(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_rfftn_out(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_irfftn(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_irfftn_out(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_hfftn(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +const Tensor & fft_hfftn_out(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, const Tensor & out); // {"schema": "aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ihfftn(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +const Tensor & fft_ihfftn_out(const Tensor & self, OptionalSymIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, const Tensor & out); // {"schema": "aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_fftfreq(int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fft_fftfreq_out(int64_t n, double d, Tensor & out); // {"schema": "aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor fft_rfftfreq(int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fft_rfftfreq_out(int64_t n, double d, Tensor & out); // {"schema": "aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor fft_fftshift(const Tensor & self, OptionalIntArrayRef dim); // {"schema": "aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fft_ifftshift(const Tensor & self, OptionalIntArrayRef dim); // {"schema": "aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple linalg_cholesky_ex(const Tensor & self, bool upper, bool check_errors); // {"schema": "aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple linalg_cholesky_ex_out(const Tensor & self, bool upper, bool check_errors, Tensor & L, Tensor & info); // {"schema": "aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)", "dispatch": "True", "default": "False"} +Tensor linalg_cholesky(const Tensor & self, bool upper); // {"schema": "aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_cholesky_out(const Tensor & self, bool upper, Tensor & out); // {"schema": "aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_cross(const Tensor & self, const Tensor & other, int64_t dim); // {"schema": "aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_cross_out(const Tensor & self, const Tensor & other, int64_t dim, Tensor & out); // {"schema": "aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple linalg_lu_factor(const Tensor & A, bool pivot); // {"schema": "aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)", "dispatch": "False", "default": "True"} +::std::tuple linalg_lu_factor_out(const Tensor & A, bool pivot, Tensor & LU, Tensor & pivots); // {"schema": "aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)", "dispatch": "False", "default": "True"} +::std::tuple linalg_lu_factor_ex(const Tensor & A, bool pivot, bool check_errors); // {"schema": "aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple linalg_lu_factor_ex_out(const Tensor & A, bool pivot, bool check_errors, Tensor & LU, Tensor & pivots, Tensor & info); // {"schema": "aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)", "dispatch": "True", "default": "False"} +::std::tuple linalg_lu(const Tensor & A, bool pivot); // {"schema": "aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)", "dispatch": "True", "default": "True"} +::std::tuple linalg_lu_out(const Tensor & A, bool pivot, Tensor & P, Tensor & L, Tensor & U); // {"schema": "aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)", "dispatch": "True", "default": "False"} +Tensor linalg_lu_solve(const Tensor & LU, const Tensor & pivots, const Tensor & B, bool left, bool adjoint); // {"schema": "aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_lu_solve_out(const Tensor & LU, const Tensor & pivots, const Tensor & B, bool left, bool adjoint, Tensor & out); // {"schema": "aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple _linalg_det(const Tensor & A); // {"schema": "aten::_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_det_out(const Tensor & A, Tensor & result, Tensor & LU, Tensor & pivots); // {"schema": "aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)", "dispatch": "True", "default": "False"} +Tensor linalg_det(const Tensor & A); // {"schema": "aten::linalg_det(Tensor A) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_det_out(const Tensor & A, Tensor & out); // {"schema": "aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor det(const Tensor & self); // {"schema": "aten::det(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple linalg_ldl_factor_ex(const Tensor & self, bool hermitian, bool check_errors); // {"schema": "aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple linalg_ldl_factor_ex_out(const Tensor & self, bool hermitian, bool check_errors, Tensor & LD, Tensor & pivots, Tensor & info); // {"schema": "aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)", "dispatch": "True", "default": "False"} +::std::tuple linalg_ldl_factor(const Tensor & self, bool hermitian); // {"schema": "aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)", "dispatch": "False", "default": "True"} +::std::tuple linalg_ldl_factor_out(const Tensor & self, bool hermitian, Tensor & LD, Tensor & pivots); // {"schema": "aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)", "dispatch": "False", "default": "True"} +Tensor linalg_ldl_solve(const Tensor & LD, const Tensor & pivots, const Tensor & B, bool hermitian); // {"schema": "aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_ldl_solve_out(const Tensor & LD, const Tensor & pivots, const Tensor & B, bool hermitian, Tensor & out); // {"schema": "aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple linalg_lstsq(const Tensor & self, const Tensor & b, c10::optional rcond, c10::optional driver); // {"schema": "aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)", "dispatch": "True", "default": "True"} +::std::tuple linalg_lstsq_out(const Tensor & self, const Tensor & b, c10::optional rcond, c10::optional driver, Tensor & solution, Tensor & residuals, Tensor & rank, Tensor & singular_values); // {"schema": "aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)", "dispatch": "True", "default": "False"} +Tensor linalg_matmul(const Tensor & self, const Tensor & other); // {"schema": "aten::linalg_matmul(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matmul_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_vecdot(const Tensor & x, const Tensor & y, int64_t dim); // {"schema": "aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_vecdot_out(const Tensor & x, const Tensor & y, int64_t dim, Tensor & out); // {"schema": "aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_exp(const Tensor & self); // {"schema": "aten::linalg_matrix_exp(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _linalg_slogdet(const Tensor & A); // {"schema": "aten::_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_slogdet_out(const Tensor & A, Tensor & sign, Tensor & logabsdet, Tensor & LU, Tensor & pivots); // {"schema": "aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)", "dispatch": "True", "default": "False"} +::std::tuple linalg_slogdet(const Tensor & A); // {"schema": "aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)", "dispatch": "False", "default": "True"} +::std::tuple linalg_slogdet_out(const Tensor & A, Tensor & sign, Tensor & logabsdet); // {"schema": "aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)", "dispatch": "False", "default": "True"} +::std::tuple slogdet(const Tensor & self); // {"schema": "aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)", "dispatch": "False", "default": "True"} +::std::tuple slogdet_out(const Tensor & self, Tensor & sign, Tensor & logabsdet); // {"schema": "aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)", "dispatch": "False", "default": "True"} +Tensor logdet(const Tensor & self); // {"schema": "aten::logdet(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple linalg_eig(const Tensor & self); // {"schema": "aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)", "dispatch": "True", "default": "False"} +::std::tuple linalg_eig_out(const Tensor & self, Tensor & eigenvalues, Tensor & eigenvectors); // {"schema": "aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)", "dispatch": "True", "default": "False"} +Tensor _linalg_eigvals(const Tensor & self); // {"schema": "aten::_linalg_eigvals(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor linalg_eigvals(const Tensor & self); // {"schema": "aten::linalg_eigvals(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_eigvals_out(const Tensor & self, Tensor & out); // {"schema": "aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple _linalg_eigh(const Tensor & A, c10::string_view UPLO, bool compute_v); // {"schema": "aten::_linalg_eigh(Tensor A, str UPLO=\"L\", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_eigh_out(const Tensor & A, c10::string_view UPLO, bool compute_v, Tensor & eigenvalues, Tensor & eigenvectors); // {"schema": "aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO=\"L\", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)", "dispatch": "True", "default": "False"} +::std::tuple linalg_eigh(const Tensor & self, c10::string_view UPLO); // {"schema": "aten::linalg_eigh(Tensor self, str UPLO=\"L\") -> (Tensor eigenvalues, Tensor eigenvectors)", "dispatch": "False", "default": "True"} +::std::tuple linalg_eigh_out(const Tensor & self, c10::string_view UPLO, Tensor & eigvals, Tensor & eigvecs); // {"schema": "aten::linalg_eigh.eigvals(Tensor self, str UPLO=\"L\", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)", "dispatch": "False", "default": "True"} +Tensor linalg_eigvalsh(const Tensor & self, c10::string_view UPLO); // {"schema": "aten::linalg_eigvalsh(Tensor self, str UPLO=\"L\") -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_eigvalsh_out(const Tensor & self, c10::string_view UPLO, Tensor & out); // {"schema": "aten::linalg_eigvalsh.out(Tensor self, str UPLO=\"L\", *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_householder_product(const Tensor & input, const Tensor & tau); // {"schema": "aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & linalg_householder_product_out(const Tensor & input, const Tensor & tau, Tensor & out); // {"schema": "aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple linalg_inv_ex(const Tensor & A, bool check_errors); // {"schema": "aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple linalg_inv_ex_out(const Tensor & A, bool check_errors, Tensor & inverse, Tensor & info); // {"schema": "aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)", "dispatch": "True", "default": "False"} +Tensor linalg_inv(const Tensor & A); // {"schema": "aten::linalg_inv(Tensor A) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_inv_out(const Tensor & A, Tensor & out); // {"schema": "aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor inverse(const Tensor & self); // {"schema": "aten::inverse(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & inverse_out(const Tensor & self, Tensor & out); // {"schema": "aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor inner(const Tensor & self, const Tensor & other); // {"schema": "aten::inner(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & inner_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor outer(const Tensor & self, const Tensor & vec2); // {"schema": "aten::outer(Tensor self, Tensor vec2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & outer_out(const Tensor & self, const Tensor & vec2, Tensor & out); // {"schema": "aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor ger(const Tensor & self, const Tensor & vec2); // {"schema": "aten::ger(Tensor self, Tensor vec2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & ger_out(const Tensor & self, const Tensor & vec2, Tensor & out); // {"schema": "aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_norm(const Tensor & self, const c10::optional & ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor linalg_norm(const Tensor & self, c10::string_view ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_norm_out(const Tensor & self, const c10::optional & ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & linalg_norm_out(const Tensor & self, c10::string_view ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_vector_norm(const Tensor & self, const Scalar & ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_vector_norm_out(const Tensor & self, const Scalar & ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor linalg_matrix_norm(const Tensor & self, const Scalar & ord, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_norm_out(const Tensor & self, const Scalar & ord, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_norm(const Tensor & self, c10::string_view ord, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_norm_out(const Tensor & self, c10::string_view ord, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple _linalg_svd(const Tensor & A, bool full_matrices, bool compute_uv, c10::optional driver); // {"schema": "aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_svd_out(const Tensor & A, bool full_matrices, bool compute_uv, c10::optional driver, Tensor & U, Tensor & S, Tensor & Vh); // {"schema": "aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)", "dispatch": "True", "default": "False"} +::std::tuple linalg_svd(const Tensor & A, bool full_matrices, c10::optional driver); // {"schema": "aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)", "dispatch": "False", "default": "True"} +::std::tuple linalg_svd_out(const Tensor & A, bool full_matrices, c10::optional driver, Tensor & U, Tensor & S, Tensor & Vh); // {"schema": "aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)", "dispatch": "False", "default": "True"} +Tensor linalg_svdvals(const Tensor & A, c10::optional driver); // {"schema": "aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_svdvals_out(const Tensor & A, c10::optional driver, Tensor & out); // {"schema": "aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_cond(const Tensor & self, const c10::optional & p); // {"schema": "aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_cond_out(const Tensor & self, const c10::optional & p, Tensor & out); // {"schema": "aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_cond(const Tensor & self, c10::string_view p); // {"schema": "aten::linalg_cond.p_str(Tensor self, str p) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_cond_out(const Tensor & self, c10::string_view p, Tensor & out); // {"schema": "aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_pinv(const Tensor & self, const c10::optional & atol, const c10::optional & rtol, bool hermitian); // {"schema": "aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_pinv_out(const Tensor & self, const c10::optional & atol, const c10::optional & rtol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor linalg_pinv(const Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian); // {"schema": "aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_pinv_out(const Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_pinv(const Tensor & self, double rcond, bool hermitian); // {"schema": "aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor linalg_pinv(const Tensor & self, const Tensor & rcond, bool hermitian); // {"schema": "aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_pinv_out(const Tensor & self, double rcond, bool hermitian, Tensor & out); // {"schema": "aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & linalg_pinv_out(const Tensor & self, const Tensor & rcond, bool hermitian, Tensor & out); // {"schema": "aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple _linalg_solve_ex(const Tensor & A, const Tensor & B, bool left, bool check_errors); // {"schema": "aten::_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_solve_ex_out(const Tensor & A, const Tensor & B, bool left, bool check_errors, Tensor & result, Tensor & LU, Tensor & pivots, Tensor & info); // {"schema": "aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)", "dispatch": "True", "default": "False"} +::std::tuple linalg_solve_ex(const Tensor & A, const Tensor & B, bool left, bool check_errors); // {"schema": "aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)", "dispatch": "False", "default": "True"} +::std::tuple linalg_solve_ex_out(const Tensor & A, const Tensor & B, bool left, bool check_errors, Tensor & result, Tensor & info); // {"schema": "aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)", "dispatch": "False", "default": "True"} +Tensor linalg_solve(const Tensor & A, const Tensor & B, bool left); // {"schema": "aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_solve_out(const Tensor & A, const Tensor & B, bool left, Tensor & out); // {"schema": "aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_tensorinv(const Tensor & self, int64_t ind); // {"schema": "aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_tensorinv_out(const Tensor & self, int64_t ind, Tensor & out); // {"schema": "aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_tensorsolve(const Tensor & self, const Tensor & other, OptionalIntArrayRef dims); // {"schema": "aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_tensorsolve_out(const Tensor & self, const Tensor & other, OptionalIntArrayRef dims, Tensor & out); // {"schema": "aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple linalg_qr(const Tensor & A, c10::string_view mode); // {"schema": "aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)", "dispatch": "True", "default": "True"} +::std::tuple linalg_qr_out(const Tensor & A, c10::string_view mode, Tensor & Q, Tensor & R); // {"schema": "aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)", "dispatch": "True", "default": "False"} +Tensor linalg_matrix_power(const Tensor & self, int64_t n); // {"schema": "aten::linalg_matrix_power(Tensor self, int n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_power_out(const Tensor & self, int64_t n, Tensor & out); // {"schema": "aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_rank(const Tensor & input, const c10::optional & atol, const c10::optional & rtol, bool hermitian); // {"schema": "aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_rank_out(const Tensor & input, const c10::optional & atol, const c10::optional & rtol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_rank(const Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian); // {"schema": "aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_rank_out(const Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_rank(const Tensor & self, double tol, bool hermitian); // {"schema": "aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_rank_out(const Tensor & self, double tol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_rank(const Tensor & input, const Tensor & tol, bool hermitian); // {"schema": "aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_rank_out(const Tensor & input, const Tensor & tol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_multi_dot(TensorList tensors); // {"schema": "aten::linalg_multi_dot(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_multi_dot_out(TensorList tensors, Tensor & out); // {"schema": "aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nested_to_padded_tensor(const Tensor & self, double padding, OptionalIntArrayRef output_size); // {"schema": "aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_serialization_subcmul(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_parallel_materialize(const Tensor & self, int64_t num_parallel, bool skip_first); // {"schema": "aten::_test_parallel_materialize(Tensor self, int num_parallel, bool skip_first=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _test_optional_intlist(const Tensor & values, OptionalIntArrayRef addends); // {"schema": "aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _test_optional_filled_intlist(const Tensor & values, OptionalIntArrayRef addends); // {"schema": "aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _test_optional_floatlist(const Tensor & values, c10::optional> addends); // {"schema": "aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _test_string_default(const Tensor & dummy, c10::string_view a, c10::string_view b); // {"schema": "aten::_test_string_default(Tensor dummy, str a=\"\\\"'\\\\\", str b='\"\\'\\\\') -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_ambiguous_defaults(const Tensor & dummy, int64_t a, int64_t b); // {"schema": "aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_ambiguous_defaults(const Tensor & dummy, int64_t a, c10::string_view b); // {"schema": "aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b=\"2\") -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_warn_in_autograd(const Tensor & self); // {"schema": "aten::_test_warn_in_autograd(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _test_autograd_multiple_dispatch(const Tensor & self); // {"schema": "aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _test_autograd_multiple_dispatch(const Tensor & self, bool b); // {"schema": "aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _test_autograd_multiple_dispatch_view(const Tensor & self); // {"schema": "aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor _test_autograd_multiple_dispatch_view_copy(const Tensor & self); // {"schema": "aten::_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor segment_reduce(const Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial); // {"schema": "aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _segment_reduce_backward(const Tensor & grad, const Tensor & output, const Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & offsets, int64_t axis, const c10::optional & initial); // {"schema": "aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor pad_sequence(TensorList sequences, bool batch_first, double padding_value); // {"schema": "aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor flatten_dense_tensors(TensorList tensors); // {"schema": "aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector unflatten_dense_tensors(const Tensor & flat, TensorList tensors); // {"schema": "aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor _nested_tensor_from_tensor_list(TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _fw_primal_copy(const Tensor & self, int64_t level); // {"schema": "aten::_fw_primal_copy(Tensor self, int level) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _make_dual_copy(const Tensor & primal, const Tensor & tangent, int64_t level); // {"schema": "aten::_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor", "dispatch": "True", "default": "True"} +Tensor view_as_real_copy(const Tensor & self); // {"schema": "aten::view_as_real_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor view_as_complex_copy(const Tensor & self); // {"schema": "aten::view_as_complex_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _conj_copy(const Tensor & self); // {"schema": "aten::_conj_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _neg_view_copy(const Tensor & self); // {"schema": "aten::_neg_view_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor as_strided_copy(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); // {"schema": "aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _sparse_broadcast_to_copy(const Tensor & self, IntArrayRef size); // {"schema": "aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor", "dispatch": "True", "default": "True"} +Tensor diagonal_copy(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor expand_copy(const Tensor & self, c10::SymIntArrayRef size, bool implicit); // {"schema": "aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor permute_copy(const Tensor & self, IntArrayRef dims); // {"schema": "aten::permute_copy(Tensor self, int[] dims) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _reshape_alias_copy(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor", "dispatch": "True", "default": "True"} +Tensor select_copy(const Tensor & self, int64_t dim, c10::SymInt index); // {"schema": "aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor", "dispatch": "True", "default": "True"} +Tensor detach_copy(const Tensor & self); // {"schema": "aten::detach_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor slice_copy(const Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); // {"schema": "aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor", "dispatch": "True", "default": "True"} +::std::vector split_copy(const Tensor & self, c10::SymInt split_size, int64_t dim); // {"schema": "aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +::std::vector split_with_sizes_copy(const Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim); // {"schema": "aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +Tensor squeeze_copy(const Tensor & self); // {"schema": "aten::squeeze_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor squeeze_copy(const Tensor & self, int64_t dim); // {"schema": "aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor squeeze_copy(const Tensor & self, IntArrayRef dim); // {"schema": "aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor t_copy(const Tensor & self); // {"schema": "aten::t_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor transpose_copy(const Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor unsqueeze_copy(const Tensor & self, int64_t dim); // {"schema": "aten::unsqueeze_copy(Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _indices_copy(const Tensor & self); // {"schema": "aten::_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _values_copy(const Tensor & self); // {"schema": "aten::_values_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor indices_copy(const Tensor & self); // {"schema": "aten::indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor values_copy(const Tensor & self); // {"schema": "aten::values_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor crow_indices_copy(const Tensor & self); // {"schema": "aten::crow_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor col_indices_copy(const Tensor & self); // {"schema": "aten::col_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor ccol_indices_copy(const Tensor & self); // {"schema": "aten::ccol_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor row_indices_copy(const Tensor & self); // {"schema": "aten::row_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +::std::vector unbind_copy(const Tensor & self, int64_t dim); // {"schema": "aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +void unbind_copy_out(const Tensor & self, int64_t dim, TensorList out); // {"schema": "aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void split_copy_out(const Tensor & self, c10::SymInt split_size, int64_t dim, TensorList out); // {"schema": "aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void split_with_sizes_copy_out(const Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, TensorList out); // {"schema": "aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor view_copy(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::view_copy(Tensor self, SymInt[] size) -> Tensor", "dispatch": "True", "default": "True"} +Tensor view_copy(const Tensor & self, ScalarType dtype); // {"schema": "aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor unfold_copy(const Tensor & self, int64_t dimension, int64_t size, int64_t step); // {"schema": "aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor", "dispatch": "True", "default": "True"} +Tensor alias_copy(const Tensor & self); // {"schema": "aten::alias_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor to_padded_tensor(const Tensor & self, double padding, OptionalSymIntArrayRef output_size); // {"schema": "aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_softmax_with_shape(const Tensor & self, const Tensor & query); // {"schema": "aten::_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _transformer_encoder_layer_fwd(const Tensor & src, int64_t embed_dim, int64_t num_heads, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const Tensor & norm_weight_1, const Tensor & norm_bias_1, const Tensor & norm_weight_2, const Tensor & norm_bias_2, const Tensor & ffn_weight_1, const Tensor & ffn_bias_1, const Tensor & ffn_weight_2, const Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type); // {"schema": "aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _native_multi_head_attention(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type); // {"schema": "aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor scaled_dot_product_attention(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & attn_mask, double dropout_p, bool is_causal, c10::optional scale); // {"schema": "aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> Tensor", "dispatch": "False", "default": "True"} +int64_t _fused_sdp_choice(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & attn_mask, double dropout_p, bool is_causal, c10::optional scale); // {"schema": "aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> int", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_attention_math(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & attn_mask, double dropout_p, bool is_causal, const c10::optional & dropout_mask, c10::optional scale); // {"schema": "aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple _scaled_dot_product_flash_attention(const Tensor & query, const Tensor & key, const Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale); // {"schema": "aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_flash_attention_for_cpu(const Tensor & query, const Tensor & key, const Tensor & value, double dropout_p, bool is_causal, const c10::optional & attn_mask, c10::optional scale); // {"schema": "aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_flash_attention_backward(const Tensor & grad_out, const Tensor & query, const Tensor & key, const Tensor & value, const Tensor & out, const Tensor & logsumexp, const Tensor & cum_seq_q, const Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const Tensor & philox_seed, const Tensor & philox_offset, c10::optional scale); // {"schema": "aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_flash_attention_for_cpu_backward(const Tensor & grad_out, const Tensor & query, const Tensor & key, const Tensor & value, const Tensor & out, const Tensor & logsumexp, double dropout_p, bool is_causal, const c10::optional & attn_mask, c10::optional scale); // {"schema": "aten::_scaled_dot_product_flash_attention_for_cpu_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, float dropout_p, bool is_causal, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_efficient_attention(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, c10::optional scale); // {"schema": "aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_efficient_attention_backward(const Tensor & grad_out_, const Tensor & query, const Tensor & key, const Tensor & value, const Tensor & attn_bias, const Tensor & out, const Tensor & logsumexp, const Tensor & philox_seed, const Tensor & philox_offset, double dropout_p, ::std::array grad_input_mask, bool is_causal, c10::optional scale); // {"schema": "aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_cudnn_attention(const Tensor & query, const Tensor & key, const Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale); // {"schema": "aten::_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset)", "dispatch": "True", "default": "False"} +::std::tuple _flash_attention_forward(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & cum_seq_q, const c10::optional & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale); // {"schema": "aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, bool return_debug_mask, *, float? scale=None) -> (Tensor output, Tensor softmax_logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)", "dispatch": "True", "default": "False"} +::std::tuple _flash_attention_backward(const Tensor & grad_out, const Tensor & query, const Tensor & key, const Tensor & value, const Tensor & out, const Tensor & logsumexp, const Tensor & cum_seq_q, const Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const Tensor & philox_seed, const Tensor & philox_offset, c10::optional scale); // {"schema": "aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _efficient_attention_forward(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & bias, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, c10::optional max_seqlen_q, c10::optional max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp, c10::optional scale, const c10::optional & causal_diagonal, const c10::optional & seqlen_k); // {"schema": "aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, int? max_seqlen_k, float dropout_p, int custom_mask_type, bool compute_log_sumexp=False, *, float? scale=None, Tensor? causal_diagonal=None, Tensor? seqlen_k=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, SymInt max_seqlen_batch_q, SymInt max_seqlen_batch_k)", "dispatch": "True", "default": "False"} +::std::tuple _efficient_attention_backward(const Tensor & grad_out_, const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & bias, const Tensor & out, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, c10::SymInt max_seqlen_q, c10::SymInt max_seqlen_k, const Tensor & logsumexp, double dropout_p, const Tensor & philox_seed, const Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, c10::optional scale, c10::optional num_splits_key); // {"schema": "aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor out, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt max_seqlen_q, SymInt max_seqlen_k, Tensor logsumexp, float dropout_p, Tensor philox_seed, Tensor philox_offset, int custom_mask_type, bool bias_requires_grad, *, float? scale=None, int? num_splits_key=None) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _triton_scaled_dot_attention(const Tensor & q, const Tensor & k, const Tensor & v, double dropout_p); // {"schema": "aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _fill_mem_eff_dropout_mask_(Tensor & self, double dropout_p, int64_t seed, int64_t offset); // {"schema": "aten::_fill_mem_eff_dropout_mask_(Tensor(a!) self, float dropout_p, int seed, int offset) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _triton_multi_head_attention(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask); // {"schema": "aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor special_airy_ai(const Tensor & x); // {"schema": "aten::special_airy_ai(Tensor x) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_airy_ai_out(const Tensor & x, Tensor & out); // {"schema": "aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_bessel_j0(const Tensor & self); // {"schema": "aten::special_bessel_j0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_bessel_j0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_bessel_j1(const Tensor & self); // {"schema": "aten::special_bessel_j1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_bessel_j1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_bessel_y0(const Tensor & self); // {"schema": "aten::special_bessel_y0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_bessel_y0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_bessel_y1(const Tensor & self); // {"schema": "aten::special_bessel_y1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_bessel_y1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_chebyshev_polynomial_t(const Tensor & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_t(const Scalar & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_t(const Tensor & x, const Scalar & n); // {"schema": "aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_t_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_chebyshev_polynomial_t_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_t_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_u(const Tensor & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_u(const Scalar & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_u(const Tensor & x, const Scalar & n); // {"schema": "aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_u_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_chebyshev_polynomial_u_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_u_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_v(const Tensor & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_v(const Scalar & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_v(const Tensor & x, const Scalar & n); // {"schema": "aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_v_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_chebyshev_polynomial_v_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_v_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_w(const Tensor & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_w(const Scalar & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_w(const Tensor & x, const Scalar & n); // {"schema": "aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_w_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_chebyshev_polynomial_w_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_chebyshev_polynomial_w_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_h(const Tensor & x, const Tensor & n); // {"schema": "aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_h(const Scalar & x, const Tensor & n); // {"schema": "aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_h(const Tensor & x, const Scalar & n); // {"schema": "aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_hermite_polynomial_h_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_hermite_polynomial_h_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_hermite_polynomial_h_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_he(const Tensor & x, const Tensor & n); // {"schema": "aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_he(const Scalar & x, const Tensor & n); // {"schema": "aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_he(const Tensor & x, const Scalar & n); // {"schema": "aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_hermite_polynomial_he_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_hermite_polynomial_he_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_hermite_polynomial_he_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_laguerre_polynomial_l(const Tensor & x, const Tensor & n); // {"schema": "aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_laguerre_polynomial_l(const Scalar & x, const Tensor & n); // {"schema": "aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_laguerre_polynomial_l(const Tensor & x, const Scalar & n); // {"schema": "aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_laguerre_polynomial_l_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_laguerre_polynomial_l_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_laguerre_polynomial_l_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_legendre_polynomial_p(const Tensor & x, const Tensor & n); // {"schema": "aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_legendre_polynomial_p(const Scalar & x, const Tensor & n); // {"schema": "aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_legendre_polynomial_p(const Tensor & x, const Scalar & n); // {"schema": "aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_legendre_polynomial_p_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_legendre_polynomial_p_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_legendre_polynomial_p_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_modified_bessel_i0(const Tensor & self); // {"schema": "aten::special_modified_bessel_i0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_modified_bessel_i0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_modified_bessel_i1(const Tensor & self); // {"schema": "aten::special_modified_bessel_i1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_modified_bessel_i1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_modified_bessel_k0(const Tensor & self); // {"schema": "aten::special_modified_bessel_k0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_modified_bessel_k0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_modified_bessel_k1(const Tensor & self); // {"schema": "aten::special_modified_bessel_k1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_modified_bessel_k1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_scaled_modified_bessel_k0(const Tensor & x); // {"schema": "aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_scaled_modified_bessel_k0_out(const Tensor & x, Tensor & out); // {"schema": "aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_scaled_modified_bessel_k1(const Tensor & x); // {"schema": "aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_scaled_modified_bessel_k1_out(const Tensor & x, Tensor & out); // {"schema": "aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_shifted_chebyshev_polynomial_t(const Tensor & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_t(const Scalar & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_t(const Tensor & x, const Scalar & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_t_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_shifted_chebyshev_polynomial_t_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_t_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_u(const Tensor & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_u(const Scalar & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_u(const Tensor & x, const Scalar & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_u_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_shifted_chebyshev_polynomial_u_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_u_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_v(const Tensor & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_v(const Scalar & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_v(const Tensor & x, const Scalar & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_v_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_shifted_chebyshev_polynomial_v_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_v_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_w(const Tensor & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_w(const Scalar & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_w(const Tensor & x, const Scalar & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_w_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_shifted_chebyshev_polynomial_w_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_w_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_spherical_bessel_j0(const Tensor & x); // {"schema": "aten::special_spherical_bessel_j0(Tensor x) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_spherical_bessel_j0_out(const Tensor & x, Tensor & out); // {"schema": "aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _foobar(const Tensor & self, bool arg1, bool arg2, bool arg3); // {"schema": "aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor", "dispatch": "True", "default": "False"} +void _fused_adam_(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", "dispatch": "True", "default": "False"} +void _fused_adam_(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, const Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", "dispatch": "True", "default": "False"} +void _fused_adamw_(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", "dispatch": "True", "default": "False"} +void _fused_adamw_(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, const Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adamw_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", "dispatch": "True", "default": "False"} +void _fused_sgd_(TensorList self, TensorList grads, TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", "dispatch": "True", "default": "False"} +void _fused_sgd_(TensorList self, TensorList grads, TensorList momentum_buffer_list, double weight_decay, double momentum, const Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", "dispatch": "True", "default": "False"} +void _propagate_xla_data(const Tensor & input, const Tensor & output); // {"schema": "aten::_propagate_xla_data(Tensor input, Tensor output) -> ()", "dispatch": "False", "default": "True"} +Tensor & _new_zeros_with_same_feature_meta_out(const Tensor & self, const Tensor & other, int64_t self_num_batch_dims, Tensor & out); // {"schema": "aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _cudnn_ctc_loss_out(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, Tensor & out0, Tensor & out1); // {"schema": "aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _cudnn_rnn_flatten_weight_out(TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, Tensor & out); // {"schema": "aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _cudnn_rnn_out(const Tensor & input, TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4); // {"schema": "aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", "dispatch": "True", "default": "True"} +void _cudnn_rnn_backward_out(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const c10::optional & cx, const Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const Tensor & reserve, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2, TensorList out3); // {"schema": "aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()", "dispatch": "True", "default": "True"} +Tensor & _cudnn_init_dropout_state_out(double dropout, bool train, int64_t dropout_seed, Tensor & out); // {"schema": "aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _fused_dropout_out(const Tensor & self, double p, c10::optional generator, Tensor & out0, Tensor & out1); // {"schema": "aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _masked_scale_out(const Tensor & self, const Tensor & mask, double scale, Tensor & out); // {"schema": "aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple native_dropout_out(const Tensor & input, double p, c10::optional train, Tensor & out0, Tensor & out1); // {"schema": "aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & native_dropout_backward_out(const Tensor & grad_output, const Tensor & mask, double scale, Tensor & out); // {"schema": "aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _conj_physical_out(const Tensor & self, Tensor & out); // {"schema": "aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _add_relu_out(const Tensor & self, const Scalar & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & add_out(const Tensor & self, const Scalar & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & affine_grid_generator_out(const Tensor & theta, c10::SymIntArrayRef size, bool align_corners, Tensor & out); // {"schema": "aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_functorch_fallback_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::_test_functorch_fallback.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bartlett_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bartlett_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantized_batch_norm_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & mean, const Tensor & var, double eps, double output_scale, int64_t output_zero_point, Tensor & out); // {"schema": "aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bernoulli_out(const Tensor & self, const Tensor & p, c10::optional generator, Tensor & out); // {"schema": "aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bernoulli(const Tensor & self, const Tensor & p, c10::optional generator); // {"schema": "aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bernoulli_out(const Tensor & self, double p, c10::optional generator, Tensor & out); // {"schema": "aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & binary_cross_entropy_with_logits_out(const Tensor & self, const Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction, Tensor & out); // {"schema": "aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bincount_out(const Tensor & self, const c10::optional & weights, int64_t minlength, Tensor & out); // {"schema": "aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & blackman_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & blackman_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & block_diag_out(TensorList tensors, Tensor & out); // {"schema": "aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & constant_pad_nd_out(const Tensor & self, c10::SymIntArrayRef pad, const Scalar & value, Tensor & out); // {"schema": "aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & convolution_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, Tensor & out); // {"schema": "aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple convolution_backward_out(const Tensor & grad_output, const Tensor & input, const Tensor & weight, OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & convolution_overrideable_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, Tensor & out); // {"schema": "aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple convolution_backward_overrideable_out(const Tensor & grad_output, const Tensor & input, const Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & _convolution_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, Tensor & out); // {"schema": "aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & conv_tbc_out(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad, Tensor & out); // {"schema": "aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & copy_out(const Tensor & self, const Tensor & src, bool non_blocking, Tensor & out); // {"schema": "aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _copy_from_out(const Tensor & self, const Tensor & dst, bool non_blocking, Tensor & out); // {"schema": "aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _copy_from_and_resize_out(const Tensor & self, const Tensor & dst, Tensor & out); // {"schema": "aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & count_nonzero_out(const Tensor & self, IntArrayRef dim, Tensor & out); // {"schema": "aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & count_nonzero_out(const Tensor & self, c10::optional dim, Tensor & out); // {"schema": "aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cudnn_affine_grid_generator_out(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, Tensor & out); // {"schema": "aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cudnn_affine_grid_generator_backward_out(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, Tensor & out); // {"schema": "aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple cudnn_batch_norm_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +::std::tuple cudnn_batch_norm_backward_out(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const Tensor & reserveSpace, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & cudnn_convolution_transpose_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, Tensor & out); // {"schema": "aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mps_convolution_transpose_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, Tensor & out); // {"schema": "aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple mps_convolution_transpose_backward_out(const Tensor & self, const Tensor & grad_output, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, Tensor & out0, Tensor & out1); // {"schema": "aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & cudnn_convolution_relu_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, Tensor & out); // {"schema": "aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cudnn_convolution_add_relu_out(const Tensor & self, const Tensor & weight, const Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, Tensor & out); // {"schema": "aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cudnn_grid_sampler_out(const Tensor & self, const Tensor & grid, Tensor & out); // {"schema": "aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple cudnn_grid_sampler_backward_out(const Tensor & self, const Tensor & grid, const Tensor & grad_output, Tensor & out0, Tensor & out1); // {"schema": "aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _ctc_loss_out(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool zero_infinity, Tensor & out0, Tensor & out1); // {"schema": "aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _ctc_loss_out(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, bool zero_infinity, Tensor & out0, Tensor & out1); // {"schema": "aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _ctc_loss_backward_out(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank, bool zero_infinity, Tensor & out); // {"schema": "aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & diag_embed_out(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, Tensor & out); // {"schema": "aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & diagonal_backward_out(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, Tensor & out); // {"schema": "aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & div_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & div_out(const Tensor & self, const Scalar & other, c10::optional rounding_mode, Tensor & out); // {"schema": "aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & embedding_out(const Tensor & weight, const Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, Tensor & out); // {"schema": "aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & embedding_dense_backward_out(const Tensor & grad_output, const Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, Tensor & out); // {"schema": "aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & embedding_renorm_out(const Tensor & self, const Tensor & indices, double max_norm, double norm_type, Tensor & out); // {"schema": "aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor embedding_renorm(const Tensor & self, const Tensor & indices, double max_norm, double norm_type); // {"schema": "aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple _embedding_bag_forward_only_out(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +::std::tuple _embedding_bag_out(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +Tensor & _embedding_bag_dense_backward_out(const Tensor & grad, const Tensor & indices, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx, Tensor & out); // {"schema": "aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _embedding_bag_per_sample_weights_backward_out(const Tensor & grad, const Tensor & weight, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, int64_t mode, int64_t padding_idx, Tensor & out); // {"schema": "aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & empty_out(IntArrayRef size, c10::optional names, c10::optional memory_format, Tensor & out); // {"schema": "aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & empty_permuted_out(c10::SymIntArrayRef size, IntArrayRef physical_layout, Tensor & out); // {"schema": "aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_empty_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_empty_strided_out(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_full_out(const Tensor & self, c10::SymIntArrayRef size, const Scalar & fill_value, Tensor & out); // {"schema": "aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_zeros_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_ones_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _empty_affine_quantized_out(c10::SymIntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, Tensor & out); // {"schema": "aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _empty_per_channel_affine_quantized_out(c10::SymIntArrayRef size, const Tensor & scales, const Tensor & zero_points, int64_t axis, c10::optional memory_format, Tensor & out); // {"schema": "aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +const Tensor & resize_out(const Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format, const Tensor & out); // {"schema": "aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor resize(const Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format); // {"schema": "aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +const Tensor & _resize_output_out(const Tensor & self, c10::SymIntArrayRef size, Device device, const Tensor & out); // {"schema": "aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _resize_output(const Tensor & self, c10::SymIntArrayRef size, Device device); // {"schema": "aten::_resize_output(Tensor self, SymInt[] size, Device device) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & empty_quantized_out(IntArrayRef size, const Tensor & qtensor, c10::optional memory_format, Tensor & out); // {"schema": "aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & empty_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & empty_strided_out(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & fill_out(const Tensor & self, const Scalar & value, Tensor & out); // {"schema": "aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & fill_out(const Tensor & self, const Tensor & value, Tensor & out); // {"schema": "aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & floor_divide_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & full_out(IntArrayRef size, const Scalar & fill_value, c10::optional names, Tensor & out); // {"schema": "aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & full_like_out(const Tensor & self, const Scalar & fill_value, c10::optional memory_format, Tensor & out); // {"schema": "aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & from_file_out(c10::string_view filename, c10::optional shared, c10::optional size, Tensor & out); // {"schema": "aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & grid_sampler_2d_out(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, Tensor & out); // {"schema": "aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple grid_sampler_2d_backward_out(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, Tensor & out0, Tensor & out1); // {"schema": "aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _grid_sampler_2d_cpu_fallback_out(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, Tensor & out); // {"schema": "aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & grid_sampler_3d_out(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, Tensor & out); // {"schema": "aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple grid_sampler_3d_backward_out(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, Tensor & out0, Tensor & out1); // {"schema": "aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & hann_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hann_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hamming_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hamming_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hamming_window_out(int64_t window_length, bool periodic, double alpha, Tensor & out); // {"schema": "aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hamming_window_out(int64_t window_length, bool periodic, double alpha, double beta, Tensor & out); // {"schema": "aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & kaiser_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & kaiser_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & kaiser_window_out(int64_t window_length, bool periodic, double beta, Tensor & out); // {"schema": "aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple native_group_norm_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple native_group_norm_backward_out(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & index_put_out(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate, Tensor & out); // {"schema": "aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _index_put_impl_out(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate, bool unsafe, Tensor & out); // {"schema": "aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _index_put_impl(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate, bool unsafe); // {"schema": "aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isnan_out(const Tensor & self, Tensor & out); // {"schema": "aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple native_layer_norm_out(const Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple native_layer_norm_backward_out(const Tensor & grad_out, const Tensor & input, c10::SymIntArrayRef normalized_shape, const Tensor & mean, const Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple linear_backward_out(const Tensor & self, const Tensor & grad_output, const Tensor & weight, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & mkldnn_linear_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, Tensor & out); // {"schema": "aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_linear_backward_input_out(IntArrayRef input_size, const Tensor & grad_output, const Tensor & weight, Tensor & out); // {"schema": "aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple mkldnn_linear_backward_weights_out(const Tensor & grad_output, const Tensor & input, const Tensor & weight, bool bias_defined, Tensor & out0, Tensor & out1); // {"schema": "aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple mkldnn_linear_backward_out(const Tensor & self, const Tensor & grad_output, const Tensor & weight, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple matmul_backward_out(const Tensor & grad, const Tensor & self, const Tensor & other, ::std::array mask, Tensor & out0, Tensor & out1); // {"schema": "aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _aminmax_out(const Tensor & self, Tensor & out0, Tensor & out1); // {"schema": "aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _aminmax_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & out0, Tensor & out1); // {"schema": "aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & max_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_max_pool2d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_max_pool2d_backward_out(const Tensor & grad_output, const Tensor & output, const Tensor & input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_max_pool3d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_max_pool3d_backward_out(const Tensor & grad_output, const Tensor & output, const Tensor & input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantized_max_pool1d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantized_max_pool2d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantized_max_pool3d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & median_out(const Tensor & self, Tensor & out); // {"schema": "aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & nanmedian_out(const Tensor & self, Tensor & out); // {"schema": "aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mps_convolution_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, Tensor & out); // {"schema": "aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple mps_convolution_backward_out(const Tensor & self, const Tensor & grad_output, const Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & mkldnn_convolution_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, Tensor & out); // {"schema": "aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple mkldnn_rnn_layer_out(const Tensor & input, const Tensor & weight0, const Tensor & weight1, const Tensor & weight2, const Tensor & weight3, const Tensor & hx_, const Tensor & cx_, bool reverse, IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +::std::tuple mkldnn_rnn_layer_backward_out(const Tensor & input, const Tensor & weight1, const Tensor & weight2, const Tensor & weight3, const Tensor & weight4, const Tensor & hx_, const Tensor & cx_tmp, const Tensor & output, const Tensor & hy_, const Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, IntArrayRef batch_sizes, bool batch_first, const Tensor & workspace, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4, Tensor & out5, Tensor & out6); // {"schema": "aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))", "dispatch": "True", "default": "True"} +::std::tuple miopen_batch_norm_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple miopen_batch_norm_backward_out(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & miopen_convolution_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, Tensor & out); // {"schema": "aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & miopen_convolution_transpose_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, Tensor & out); // {"schema": "aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & miopen_depthwise_convolution_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, Tensor & out); // {"schema": "aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple miopen_rnn_out(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional & dropout_state, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4); // {"schema": "aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", "dispatch": "True", "default": "True"} +void miopen_rnn_backward_out(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const c10::optional & cx, const Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional & dropout_state, const Tensor & reserve, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2, TensorList out3); // {"schema": "aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()", "dispatch": "True", "default": "True"} +Tensor & _sparse_sparse_matmul_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mul_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _native_batch_norm_legit_functional(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps); // {"schema": "aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)", "dispatch": "True", "default": "True"} +::std::tuple _native_batch_norm_legit_no_training_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & running_mean, const Tensor & running_var, double momentum, double eps, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_stats_out(const Tensor & input, double eps, Tensor & out0, Tensor & out1); // {"schema": "aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_gather_stats_out(const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, int64_t count, Tensor & out0, Tensor & out1); // {"schema": "aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_gather_stats_with_counts_out(const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const Tensor & counts, Tensor & out0, Tensor & out1); // {"schema": "aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple native_batch_norm_backward_out(const Tensor & grad_out, const Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_backward_reduce_out(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +Tensor & batch_norm_backward_elemt_out(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & weight, const Tensor & sum_dy, const Tensor & sum_dy_xmu, const Tensor & count, Tensor & out); // {"schema": "aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_update_stats_out(const Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum, Tensor & out0, Tensor & out1); // {"schema": "aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _nnpack_spatial_convolution_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ones_out(IntArrayRef size, c10::optional names, Tensor & out); // {"schema": "aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ones_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _euclidean_dist_out(const Tensor & x1, const Tensor & x2, Tensor & out); // {"schema": "aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _cdist_forward_out(const Tensor & x1, const Tensor & x2, double p, c10::optional compute_mode, Tensor & out); // {"schema": "aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _cdist_backward_out(const Tensor & grad, const Tensor & x1, const Tensor & x2, double p, const Tensor & cdist, Tensor & out); // {"schema": "aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _pdist_forward_out(const Tensor & self, double p, Tensor & out); // {"schema": "aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _pdist_backward_out(const Tensor & grad, const Tensor & self, double p, const Tensor & pdist, Tensor & out); // {"schema": "aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & pixel_shuffle_out(const Tensor & self, int64_t upscale_factor, Tensor & out); // {"schema": "aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & pixel_unshuffle_out(const Tensor & self, int64_t downscale_factor, Tensor & out); // {"schema": "aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & channel_shuffle_out(const Tensor & self, c10::SymInt groups, Tensor & out); // {"schema": "aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _pin_memory_out(const Tensor & self, c10::optional device, Tensor & out); // {"schema": "aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scalar_tensor_out(const Scalar & s, Tensor & out); // {"schema": "aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rand_out(c10::SymIntArrayRef size, c10::optional names, Tensor & out); // {"schema": "aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rand_out(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, Tensor & out); // {"schema": "aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rand_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_like_out(const Tensor & self, c10::SymInt high, c10::optional memory_format, Tensor & out); // {"schema": "aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_like_out(const Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional memory_format, Tensor & out); // {"schema": "aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randn_out(c10::SymIntArrayRef size, c10::optional names, Tensor & out); // {"schema": "aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randn_out(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, Tensor & out); // {"schema": "aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randn_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & repeat_out(const Tensor & self, c10::SymIntArrayRef repeats, Tensor & out); // {"schema": "aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & repeat_interleave_out(const Tensor & repeats, c10::optional output_size, Tensor & out); // {"schema": "aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mkldnn_reshape_out(const Tensor & self, IntArrayRef shape, Tensor & out); // {"schema": "aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & relu_out(const Tensor & self, Tensor & out); // {"schema": "aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & select_backward_out(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, Tensor & out); // {"schema": "aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & celu_out(const Tensor & self, const Scalar & alpha, Tensor & out); // {"schema": "aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slice_backward_out(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, Tensor & out); // {"schema": "aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slice_scatter_out(const Tensor & self, const Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, Tensor & out); // {"schema": "aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & select_scatter_out(const Tensor & self, const Tensor & src, int64_t dim, c10::SymInt index, Tensor & out); // {"schema": "aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & diagonal_scatter_out(const Tensor & self, const Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, Tensor & out); // {"schema": "aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & as_strided_scatter_out(const Tensor & self, const Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, Tensor & out); // {"schema": "aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void unsafe_split_out(const Tensor & self, c10::SymInt split_size, int64_t dim, TensorList out); // {"schema": "aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void unsafe_split_with_sizes_out(const Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, TensorList out); // {"schema": "aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & sum_out(const Tensor & self, c10::optional dtype, Tensor & out); // {"schema": "aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple std_mean_out(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, Tensor & out0, Tensor & out1); // {"schema": "aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & prod_out(const Tensor & self, c10::optional dtype, Tensor & out); // {"schema": "aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mkldnn_transpose_out(const Tensor & self, int64_t dim0, int64_t dim1, Tensor & out); // {"schema": "aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & flip_out(const Tensor & self, IntArrayRef dims, Tensor & out); // {"schema": "aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & roll_out(const Tensor & self, c10::SymIntArrayRef shifts, IntArrayRef dims, Tensor & out); // {"schema": "aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rot90_out(const Tensor & self, int64_t k, IntArrayRef dims, Tensor & out); // {"schema": "aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _transform_bias_rescale_qkv_out(const Tensor & qkv, const Tensor & qkv_bias, int64_t num_heads, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_from_mask_out(const Tensor & t, const Tensor & mask, bool mask_check, Tensor & out); // {"schema": "aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_from_padded_out(const Tensor & padded, const Tensor & cpu_nested_shape_example, bool fuse_transform_0213, Tensor & out); // {"schema": "aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_size_out(const Tensor & self, Tensor & out); // {"schema": "aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_strides_out(const Tensor & self, Tensor & out); // {"schema": "aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_storage_offsets_out(const Tensor & self, Tensor & out); // {"schema": "aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_from_padded_and_nested_example_out(const Tensor & padded, const Tensor & nt_example, Tensor & out); // {"schema": "aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_view_from_buffer_copy_out(const Tensor & self, const Tensor & nested_size, const Tensor & nested_strides, const Tensor & offsets, Tensor & out); // {"schema": "aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_view_from_jagged_copy_out(const Tensor & self, const Tensor & offsets, const Tensor & dummy, const c10::optional & lengths, int64_t ragged_idx, Tensor & out); // {"schema": "aten::_nested_view_from_jagged_copy.out(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_get_values_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_nested_get_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _trilinear_out(const Tensor & i1, const Tensor & i2, const Tensor & i3, IntArrayRef expand1, IntArrayRef expand2, IntArrayRef expand3, IntArrayRef sumdim, int64_t unroll_dim, Tensor & out); // {"schema": "aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _unique_out(const Tensor & self, bool sorted, bool return_inverse, Tensor & out0, Tensor & out1); // {"schema": "aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple unique_dim_out(const Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple unique_consecutive_out(const Tensor & self, bool return_inverse, bool return_counts, c10::optional dim, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple unique_dim_consecutive_out(const Tensor & self, int64_t dim, bool return_inverse, bool return_counts, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple _unique2_out(const Tensor & self, bool sorted, bool return_inverse, bool return_counts, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & _unsafe_view_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple var_mean_out(const Tensor & self, OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, Tensor & out0, Tensor & out1); // {"schema": "aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _weight_norm_interface_out(const Tensor & v, const Tensor & g, int64_t dim, Tensor & out0, Tensor & out1); // {"schema": "aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _weight_norm_interface_backward_out(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim, Tensor & out0, Tensor & out1); // {"schema": "aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & zeros_out(IntArrayRef size, c10::optional names, Tensor & out); // {"schema": "aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _efficientzerotensor_out(c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & zeros_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _standard_gamma_grad_out(const Tensor & self, const Tensor & output, Tensor & out); // {"schema": "aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _standard_gamma_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _dirichlet_grad_out(const Tensor & x, const Tensor & alpha, const Tensor & total, Tensor & out); // {"schema": "aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sample_dirichlet_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & poisson_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & binomial_out(const Tensor & count, const Tensor & prob, c10::optional generator, Tensor & out); // {"schema": "aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & native_norm_out(const Tensor & self, const Scalar & p, Tensor & out); // {"schema": "aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & native_norm_out(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_sum_out(const Tensor & self, IntArrayRef dim, Tensor & out); // {"schema": "aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_sum_backward_out(const Tensor & grad, const Tensor & self, IntArrayRef dim, Tensor & out); // {"schema": "aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_csr_sum_out(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_csr_prod_out(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_softmax_out(const Tensor & self, int64_t dim, bool half_to_float, Tensor & out); // {"schema": "aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_softmax_backward_data_out(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self, Tensor & out); // {"schema": "aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_log_softmax_out(const Tensor & self, int64_t dim, bool half_to_float, Tensor & out); // {"schema": "aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_log_softmax_backward_data_out(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self, Tensor & out); // {"schema": "aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _spdiags_out(const Tensor & diagonals, const Tensor & offsets, IntArrayRef shape, c10::optional layout, Tensor & out); // {"schema": "aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, ScalarType dtype, Tensor & out); // {"schema": "aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & norm_out(const Tensor & self, const Scalar & p, Tensor & out); // {"schema": "aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clone_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +const Tensor & resize_as_out(const Tensor & self, const Tensor & the_template, c10::optional memory_format, const Tensor & out); // {"schema": "aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor resize_as(const Tensor & self, const Tensor & the_template, c10::optional memory_format); // {"schema": "aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +const Tensor & resize_as_sparse_out(const Tensor & self, const Tensor & the_template, const Tensor & out); // {"schema": "aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor resize_as_sparse(const Tensor & self, const Tensor & the_template); // {"schema": "aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & zero_out(const Tensor & self, Tensor & out); // {"schema": "aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor zero(const Tensor & self); // {"schema": "aten::zero(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sub_out(const Tensor & self, const Scalar & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rsub_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rsub_out(const Tensor & self, const Scalar & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_addmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sparse_coo_tensor_out(IntArrayRef size, Tensor & out); // {"schema": "aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_coo_tensor_with_dims_out(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, Tensor & out); // {"schema": "aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const Tensor & indices, const Tensor & values, c10::optional is_coalesced, Tensor & out); // {"schema": "aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +const Tensor & sparse_resize_out(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const Tensor & out); // {"schema": "aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor sparse_resize(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); // {"schema": "aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor", "dispatch": "True", "default": "True"} +const Tensor & sparse_resize_and_clear_out(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const Tensor & out); // {"schema": "aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor sparse_resize_and_clear(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); // {"schema": "aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sparse_mask_out(const Tensor & self, const Tensor & mask, Tensor & out); // {"schema": "aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_mask_projection_out(const Tensor & self, const Tensor & mask, bool accumulate_matches, Tensor & out); // {"schema": "aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _to_dense_out(const Tensor & self, c10::optional dtype, c10::optional masked_grad, Tensor & out); // {"schema": "aten::_to_dense.out(Tensor self, ScalarType? dtype=None, bool? masked_grad=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _coalesce_out(const Tensor & self, Tensor & out); // {"schema": "aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _coalesced_out(const Tensor & self, bool coalesced, Tensor & out); // {"schema": "aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _coalesced(const Tensor & self, bool coalesced); // {"schema": "aten::_coalesced(Tensor self, bool coalesced) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & copy_sparse_to_sparse_out(const Tensor & self, const Tensor & src, bool non_blocking, Tensor & out); // {"schema": "aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor copy_sparse_to_sparse(const Tensor & self, const Tensor & src, bool non_blocking); // {"schema": "aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _to_sparse_out(const Tensor & self, int64_t sparse_dim, Tensor & out); // {"schema": "aten::_to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _to_sparse_out(const Tensor & self, c10::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim, Tensor & out); // {"schema": "aten::_to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _to_sparse_csr_out(const Tensor & self, c10::optional dense_dim, Tensor & out); // {"schema": "aten::_to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _to_sparse_csc_out(const Tensor & self, c10::optional dense_dim, Tensor & out); // {"schema": "aten::_to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _to_sparse_bsr_out(const Tensor & self, IntArrayRef blocksize, c10::optional dense_dim, Tensor & out); // {"schema": "aten::_to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _to_sparse_bsc_out(const Tensor & self, IntArrayRef blocksize, c10::optional dense_dim, Tensor & out); // {"schema": "aten::_to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & to_mkldnn_out(const Tensor & self, c10::optional dtype, Tensor & out); // {"schema": "aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_reorder_conv2d_weight_out(const Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, OptionalSymIntArrayRef input_size, Tensor & out); // {"schema": "aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_reorder_conv3d_weight_out(const Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, Tensor & out); // {"schema": "aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantize_per_tensor_dynamic_out(const Tensor & self, ScalarType dtype, bool reduce_range, Tensor & out); // {"schema": "aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantize_per_tensor_out(const Tensor & self, double scale, int64_t zero_point, ScalarType dtype, Tensor & out); // {"schema": "aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantize_per_tensor_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, ScalarType dtype, Tensor & out); // {"schema": "aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void quantize_per_tensor_out(TensorList tensors, const Tensor & scales, const Tensor & zero_points, ScalarType dtype, TensorList out); // {"schema": "aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & quantize_per_channel_out(const Tensor & self, const Tensor & scales, const Tensor & zero_points, int64_t axis, ScalarType dtype, Tensor & out); // {"schema": "aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & dequantize_out(const Tensor & self, Tensor & out); // {"schema": "aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void dequantize_out(TensorList tensors, TensorList out); // {"schema": "aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & q_per_channel_scales_out(const Tensor & self, Tensor & out); // {"schema": "aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & q_per_channel_zero_points_out(const Tensor & self, Tensor & out); // {"schema": "aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & int_repr_out(const Tensor & self, Tensor & out); // {"schema": "aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _make_per_tensor_quantized_tensor_out(const Tensor & self, double scale, int64_t zero_point, Tensor & out); // {"schema": "aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _make_per_channel_quantized_tensor_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, Tensor & out); // {"schema": "aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple fake_quantize_per_tensor_affine_cachemask_out(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, Tensor & out0, Tensor & out1); // {"schema": "aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, const Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, Tensor & out0, Tensor & out1); // {"schema": "aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _fake_quantize_learnable_per_tensor_affine_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, Tensor & out); // {"schema": "aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple fake_quantize_per_channel_affine_cachemask_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, Tensor & out0, Tensor & out1); // {"schema": "aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _fake_quantize_learnable_per_channel_affine_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, Tensor & out); // {"schema": "aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _fused_moving_avg_obs_fq_helper_out(const Tensor & self, const Tensor & observer_on, const Tensor & fake_quant_on, Tensor & running_min, Tensor & running_max, Tensor & scale, Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, Tensor & out0, Tensor & out1); // {"schema": "aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))", "dispatch": "True", "default": "True"} +::std::tuple _fused_moving_avg_obs_fq_helper_functional(const Tensor & self, const Tensor & observer_on, const Tensor & fake_quant_on, const Tensor & running_min, const Tensor & running_max, const Tensor & scale, const Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); // {"schema": "aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out)", "dispatch": "True", "default": "True"} +Tensor & _to_copy_out(const Tensor & self, bool non_blocking, c10::optional memory_format, Tensor & out); // {"schema": "aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _lstm_mps_out(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4, Tensor & out5); // {"schema": "aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!))", "dispatch": "True", "default": "True"} +void lstm_mps_backward_out(const c10::optional & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & z_state, const Tensor & cell_state_fwd, const Tensor & input, const Tensor & layersOutputs, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, Tensor & out0, TensorList out1, TensorList out2); // {"schema": "aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()", "dispatch": "True", "default": "True"} +::std::tuple _thnn_fused_lstm_cell_out(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & cx, const c10::optional & input_bias, const c10::optional & hidden_bias, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple _thnn_fused_lstm_cell_backward_impl_out(const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & cx, const Tensor & cy, const Tensor & workspace, bool has_bias, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple _thnn_fused_gru_cell_out(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias, Tensor & out0, Tensor & out1); // {"schema": "aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _thnn_fused_gru_cell_backward_out(const Tensor & grad_hy, const Tensor & workspace, bool has_bias, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4); // {"schema": "aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", "dispatch": "True", "default": "True"} +::std::tuple _pack_padded_sequence_out(const Tensor & input, const Tensor & lengths, bool batch_first, Tensor & out0, Tensor & out1); // {"schema": "aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & set_out(const Tensor & self, Storage source, Tensor & out); // {"schema": "aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor set(const Tensor & self, Storage source); // {"schema": "aten::set.source_Storage(Tensor self, Storage source) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & set_out(const Tensor & self, Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor set(const Tensor & self, Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & set_out(const Tensor & self, const Tensor & source, Tensor & out); // {"schema": "aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor set(const Tensor & self, const Tensor & source); // {"schema": "aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & set_out(const Tensor & self, Tensor & out); // {"schema": "aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor set(const Tensor & self); // {"schema": "aten::set(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & lift_out(const Tensor & self, Tensor & out); // {"schema": "aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lift_fresh_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & masked_fill_out(const Tensor & self, const Tensor & mask, const Scalar & value, Tensor & out); // {"schema": "aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & masked_fill_out(const Tensor & self, const Tensor & mask, const Tensor & value, Tensor & out); // {"schema": "aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & masked_scatter_out(const Tensor & self, const Tensor & mask, const Tensor & source, Tensor & out); // {"schema": "aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _masked_softmax_out(const Tensor & self, const Tensor & mask, c10::optional dim, c10::optional mask_type, Tensor & out); // {"schema": "aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _masked_softmax_backward_out(const Tensor & grad_output, const Tensor & output, const Tensor & mask, c10::optional dim, Tensor & out); // {"schema": "aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & put_out(const Tensor & self, const Tensor & index, const Tensor & source, bool accumulate, Tensor & out); // {"schema": "aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & index_fill_out(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, Tensor & out); // {"schema": "aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & index_fill_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & value, Tensor & out); // {"schema": "aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_and_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_or_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_xor_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & __lshift___out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & __lshift___out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & __rshift___out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & __rshift___out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & random_out(const Tensor & self, int64_t from, c10::optional to, c10::optional generator, Tensor & out); // {"schema": "aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor random(const Tensor & self, int64_t from, c10::optional to, c10::optional generator); // {"schema": "aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & random_out(const Tensor & self, int64_t to, c10::optional generator, Tensor & out); // {"schema": "aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor random(const Tensor & self, int64_t to, c10::optional generator); // {"schema": "aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & random_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor random(const Tensor & self, c10::optional generator); // {"schema": "aten::random(Tensor self, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & uniform_out(const Tensor & self, double from, double to, c10::optional generator, Tensor & out); // {"schema": "aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor uniform(const Tensor & self, double from, double to, c10::optional generator); // {"schema": "aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cauchy_out(const Tensor & self, double median, double sigma, c10::optional generator, Tensor & out); // {"schema": "aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor cauchy(const Tensor & self, double median, double sigma, c10::optional generator); // {"schema": "aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log_normal_out(const Tensor & self, double mean, double std, c10::optional generator, Tensor & out); // {"schema": "aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor log_normal(const Tensor & self, double mean, double std, c10::optional generator); // {"schema": "aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & exponential_out(const Tensor & self, double lambd, c10::optional generator, Tensor & out); // {"schema": "aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor exponential(const Tensor & self, double lambd, c10::optional generator); // {"schema": "aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & geometric_out(const Tensor & self, double p, c10::optional generator, Tensor & out); // {"schema": "aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor geometric(const Tensor & self, double p, c10::optional generator); // {"schema": "aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tril_indices_out(int64_t row, int64_t col, int64_t offset, Tensor & out); // {"schema": "aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & triu_indices_out(int64_t row, int64_t col, int64_t offset, Tensor & out); // {"schema": "aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & trace_out(const Tensor & self, Tensor & out); // {"schema": "aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _cholesky_solve_helper_out(const Tensor & self, const Tensor & A, bool upper, Tensor & out); // {"schema": "aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & dist_out(const Tensor & self, const Tensor & other, const Scalar & p, Tensor & out); // {"schema": "aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void _histogramdd_bin_edges_out(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, TensorList out); // {"schema": "aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & _histogramdd_from_bin_cts_out(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, Tensor & out); // {"schema": "aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _histogramdd_from_bin_tensors_out(const Tensor & self, TensorList bins, const c10::optional & weight, bool density, Tensor & out); // {"schema": "aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & remainder_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & argsort_out(const Tensor & self, bool stable, int64_t dim, bool descending, Tensor & out); // {"schema": "aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & unfold_backward_out(const Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, Tensor & out); // {"schema": "aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & normal_out(const Tensor & self, double mean, double std, c10::optional generator, Tensor & out); // {"schema": "aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void _amp_foreach_non_finite_check_and_unscale_out(TensorList self, Tensor & found_inf, const Tensor & inv_scale, TensorList out); // {"schema": "aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,Tensor> _amp_foreach_non_finite_check_and_unscale(TensorList self, const Tensor & found_inf, const Tensor & inv_scale); // {"schema": "aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)", "dispatch": "True", "default": "True"} +Tensor & _amp_update_scale_out(const Tensor & self, Tensor & growth_tracker, const Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, Tensor & out); // {"schema": "aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _amp_update_scale(const Tensor & self, const Tensor & growth_tracker, const Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); // {"schema": "aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out)", "dispatch": "True", "default": "True"} +void _foreach_add_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_add_out(TensorList self, TensorList other, const Scalar & alpha, TensorList out); // {"schema": "aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_add_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_add_out(TensorList self, const Tensor & other, const Scalar & alpha, TensorList out); // {"schema": "aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sub_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sub_out(TensorList self, TensorList other, const Scalar & alpha, TensorList out); // {"schema": "aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sub_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_mul_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_mul_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_mul_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_mul_out(TensorList self, const Tensor & other, TensorList out); // {"schema": "aten::_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_div_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_div_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_div_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_div_out(TensorList self, const Tensor & other, TensorList out); // {"schema": "aten::_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_clamp_max_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_clamp_max_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_clamp_max_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_clamp_min_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_clamp_min_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_clamp_min_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_maximum_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_maximum_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_maximum_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_minimum_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_minimum_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_minimum_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcdiv_out(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value, TensorList out); // {"schema": "aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcdiv_out(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcdiv_out(TensorList self, TensorList tensor1, TensorList tensor2, const Tensor & scalars, TensorList out); // {"schema": "aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcmul_out(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value, TensorList out); // {"schema": "aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcmul_out(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcmul_out(TensorList self, TensorList tensor1, TensorList tensor2, const Tensor & scalars, TensorList out); // {"schema": "aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_abs_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_acos_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_asin_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_atan_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_ceil_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_cos_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_cosh_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_erf_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_erfc_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_exp_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_expm1_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_floor_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_frac_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_lerp_out(TensorList self, TensorList tensors1, TensorList weights, TensorList out); // {"schema": "aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_lerp_out(TensorList self, TensorList tensors1, const Scalar & weight, TensorList out); // {"schema": "aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_lgamma_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_log_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_log10_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_log1p_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_log2_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_neg_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_norm_out(TensorList self, const Scalar & ord, TensorList out); // {"schema": "aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_pow_out(TensorList self, TensorList exponent, TensorList out); // {"schema": "aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_pow_out(TensorList self, const Scalar & exponent, TensorList out); // {"schema": "aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_pow_out(TensorList self, ArrayRef exponent, TensorList out); // {"schema": "aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_reciprocal_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_round_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sigmoid_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sign_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sign.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sin_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sinh_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sqrt_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_tan_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_tanh_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_trunc_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_zero_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::vector _foreach_zero(TensorList self); // {"schema": "aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out", "dispatch": "True", "default": "True"} +void _foreach_copy_out(TensorList self, TensorList src, bool non_blocking, TensorList out); // {"schema": "aten::_foreach_copy.out(Tensor[] self, Tensor[] src, bool non_blocking=False, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::vector _foreach_copy(TensorList self, TensorList src, bool non_blocking); // {"schema": "aten::_foreach_copy(Tensor[] self, Tensor[] src, bool non_blocking=False) -> Tensor[] self_out", "dispatch": "True", "default": "True"} +Tensor & bucketize_out(const Scalar & self, const Tensor & boundaries, bool out_int32, bool right, Tensor & out); // {"schema": "aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & glu_jvp_out(const Tensor & glu, const Tensor & x, const Tensor & dx, int64_t dim, Tensor & out); // {"schema": "aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & glu_backward_jvp_out(const Tensor & grad_x, const Tensor & grad_glu, const Tensor & x, const Tensor & dgrad_glu, const Tensor & dx, int64_t dim, Tensor & out); // {"schema": "aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hardswish_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & out); // {"schema": "aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rrelu_with_noise_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, bool self_is_result, Tensor & out); // {"schema": "aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_adaptive_avg_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & out); // {"schema": "aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _adaptive_avg_pool2d_out(const Tensor & self, c10::SymIntArrayRef output_size, Tensor & out); // {"schema": "aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _adaptive_avg_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & out); // {"schema": "aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _adaptive_avg_pool3d_out(const Tensor & self, c10::SymIntArrayRef output_size, Tensor & out); // {"schema": "aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _adaptive_avg_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & out); // {"schema": "aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _slow_conv2d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & conv_depthwise3d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, Tensor & out); // {"schema": "aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slow_conv_dilated2d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, Tensor & out); // {"schema": "aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slow_conv_dilated3d_out(const Tensor & self, const Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, Tensor & out); // {"schema": "aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & isinf_out(const Tensor & self, Tensor & out); // {"schema": "aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & linalg_matrix_exp_out(const Tensor & self, Tensor & out); // {"schema": "aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_optional_intlist_out(const Tensor & values, OptionalIntArrayRef addends, Tensor & out); // {"schema": "aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_optional_filled_intlist_out(const Tensor & values, OptionalIntArrayRef addends, Tensor & out); // {"schema": "aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_optional_floatlist_out(const Tensor & values, c10::optional> addends, Tensor & out); // {"schema": "aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_warn_in_autograd_out(const Tensor & self, Tensor & out); // {"schema": "aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_autograd_multiple_dispatch_out(const Tensor & self, Tensor & out); // {"schema": "aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_autograd_multiple_dispatch_view_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & segment_reduce_out(const Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial, Tensor & out); // {"schema": "aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _segment_reduce_backward_out(const Tensor & grad, const Tensor & output, const Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & offsets, int64_t axis, const c10::optional & initial, Tensor & out); // {"schema": "aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_from_tensor_list_out(TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, Tensor & out); // {"schema": "aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _fw_primal_copy_out(const Tensor & self, int64_t level, Tensor & out); // {"schema": "aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _make_dual_copy_out(const Tensor & primal, const Tensor & tangent, int64_t level, Tensor & out); // {"schema": "aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & view_as_real_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & view_as_complex_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _conj_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _neg_view_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & as_strided_copy_out(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, Tensor & out); // {"schema": "aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_broadcast_to_copy_out(const Tensor & self, IntArrayRef size, Tensor & out); // {"schema": "aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & diagonal_copy_out(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, Tensor & out); // {"schema": "aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & expand_copy_out(const Tensor & self, c10::SymIntArrayRef size, bool implicit, Tensor & out); // {"schema": "aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & permute_copy_out(const Tensor & self, IntArrayRef dims, Tensor & out); // {"schema": "aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _reshape_alias_copy_out(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & select_copy_out(const Tensor & self, int64_t dim, c10::SymInt index, Tensor & out); // {"schema": "aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & detach_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slice_copy_out(const Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, Tensor & out); // {"schema": "aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_copy_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_copy_out(const Tensor & self, IntArrayRef dim, Tensor & out); // {"schema": "aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & t_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & transpose_copy_out(const Tensor & self, int64_t dim0, int64_t dim1, Tensor & out); // {"schema": "aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & unsqueeze_copy_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _values_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & values_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & crow_indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & col_indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ccol_indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & row_indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & view_copy_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & view_copy_out(const Tensor & self, ScalarType dtype, Tensor & out); // {"schema": "aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & unfold_copy_out(const Tensor & self, int64_t dimension, int64_t size, int64_t step, Tensor & out); // {"schema": "aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & alias_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & to_padded_tensor_out(const Tensor & self, double padding, OptionalSymIntArrayRef output_size, Tensor & out); // {"schema": "aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _transformer_encoder_layer_fwd_out(const Tensor & src, int64_t embed_dim, int64_t num_heads, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const Tensor & norm_weight_1, const Tensor & norm_bias_1, const Tensor & norm_weight_2, const Tensor & norm_bias_2, const Tensor & ffn_weight_1, const Tensor & ffn_bias_1, const Tensor & ffn_weight_2, const Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type, Tensor & out); // {"schema": "aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _native_multi_head_attention_out(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type, Tensor & out0, Tensor & out1); // {"schema": "aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _triton_scaled_dot_attention_out(const Tensor & q, const Tensor & k, const Tensor & v, double dropout_p, Tensor & out); // {"schema": "aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _triton_multi_head_attention_out(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask, Tensor & out); // {"schema": "aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _foobar_out(const Tensor & self, bool arg1, bool arg2, bool arg3, Tensor & out); // {"schema": "aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void _fused_adam_out(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, TensorList out); // {"schema": "aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)", "dispatch": "True", "default": "True"} +void _fused_adam_out(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, const Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, TensorList out); // {"schema": "aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, const Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)", "dispatch": "True", "default": "True"} +void _fused_adamw_out(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, TensorList out); // {"schema": "aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)", "dispatch": "True", "default": "True"} +void _fused_adamw_out(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, const Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, TensorList out); // {"schema": "aten::_fused_adamw.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, const Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adamw.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)", "dispatch": "True", "default": "True"} +void _fused_sgd_out(TensorList self, TensorList grads, TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf, TensorList out); // {"schema": "aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd(TensorList self, TensorList grads, TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_sgd(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)", "dispatch": "True", "default": "True"} +void _fused_sgd_out(TensorList self, TensorList grads, TensorList momentum_buffer_list, double weight_decay, double momentum, const Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf, TensorList out); // {"schema": "aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd(TensorList self, TensorList grads, TensorList momentum_buffer_list, double weight_decay, double momentum, const Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_sgd.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)", "dispatch": "True", "default": "True"} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ScalarType.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ScalarType.h new file mode 100644 index 0000000000000000000000000000000000000000..2181250740e23808f06e63660f50ca887169bcb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ScalarType.h @@ -0,0 +1,4 @@ +#pragma once +#include // for BC reasons +#include +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/SequenceNumber.h b/venv/lib/python3.10/site-packages/torch/include/ATen/SequenceNumber.h new file mode 100644 index 0000000000000000000000000000000000000000..41b7b97cf6abbdcf987c020e14b09a64f7729bfc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/SequenceNumber.h @@ -0,0 +1,13 @@ +#pragma once + +#include +#include + +// A simple thread local enumeration, used to link forward and backward pass +// ops and is used by autograd and observers framework +namespace at::sequence_number { + +TORCH_API uint64_t peek(); +TORCH_API uint64_t get_and_increment(); + +} // namespace at::sequence_number diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/SparseCsrTensorUtils.h b/venv/lib/python3.10/site-packages/torch/include/ATen/SparseCsrTensorUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..83ef0c553c2d7dbe043b699b8ff721f08f432be2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/SparseCsrTensorUtils.h @@ -0,0 +1,411 @@ +#pragma once + +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#include +#include +#else +#include +#include +#endif + +#define AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS(LAYOUT, NAME, ...) \ + [&] { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseCsr: \ + case kSparseCsc: \ + case kSparseBsr: \ + case kSparseBsc: \ + return __VA_ARGS__(); \ + default: \ + AT_ERROR( \ + NAME, \ + " expected sparse compressed tensor layout but got ", \ + the_layout); \ + } \ + }() + +#define AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( \ + LAYOUT, NAME, ROW_DIM_ACTION, COLUMN_DIM_ACTION) \ + [&]() { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseCsr: \ + case kSparseBsr: \ + return (ROW_DIM_ACTION)(); \ + case kSparseCsc: \ + case kSparseBsc: \ + return (COLUMN_DIM_ACTION)(); \ + default: \ + AT_ERROR( \ + NAME, \ + " expected sparse compressed tensor layout but got ", \ + the_layout); \ + } \ + }() + +#define AT_DISPATCH_PLAIN_SPARSE_COMPRESSED_LAYOUTS( \ + LAYOUT, NAME, NO_BLOCK_ACTION, BLOCK_ACTION) \ + [&]() { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseCsr: \ + case kSparseCsc: \ + return (NO_BLOCK_ACTION)(); \ + case kSparseBsr: \ + case kSparseBsc: \ + return (BLOCK_ACTION)(); \ + default: \ + AT_ERROR( \ + NAME, \ + " expected sparse compressed tensor layout but got ", \ + the_layout); \ + } \ + }() + +#define AT_DISPATCH_SPARSE_ROW_COMPRESSED_LAYOUTS( \ + LAYOUT, NAME, ROW_DIM_ACTION) \ + [&]() { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseCsr: \ + case kSparseBsr: \ + return (ROW_DIM_ACTION)(); \ + default: \ + AT_ERROR( \ + NAME, \ + " expected sparse row compressed tensor layout but got ", \ + the_layout); \ + } \ + }() + +#define AT_DISPATCH_SPARSE_COL_COMPRESSED_LAYOUTS( \ + LAYOUT, NAME, COL_DIM_ACTION) \ + [&]() { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseCsc: \ + case kSparseBsc: \ + return (COL_DIM_ACTION)(); \ + default: \ + AT_ERROR( \ + NAME, \ + " expected sparse column compressed tensor layout but got ", \ + the_layout); \ + } \ + }() + +#define AT_DISPATCH_SPARSE_COMPRESSED_NONBLOCK_LAYOUTS(LAYOUT, NAME, ACTION) \ + [&]() { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseCsr: \ + case kSparseCsc: \ + return (ACTION)(); \ + default: \ + AT_ERROR( \ + NAME, \ + " expected sparse compressed (non-block) tensor layout but got ", \ + the_layout); \ + } \ + }() + +#define AT_DISPATCH_SPARSE_COMPRESSED_BLOCK_LAYOUTS(LAYOUT, NAME, ACTION) \ + [&]() { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseBsr: \ + case kSparseBsc: \ + return (ACTION)(); \ + default: \ + AT_ERROR( \ + NAME, \ + " expected sparse compressed block tensor layout but got ", \ + the_layout); \ + } \ + }() + +#define AT_DISPATCH_SPARSE_VALUE_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \ + kComplexHalf, kHalf, kBool, kBFloat16, __VA_ARGS__)) + +namespace at::sparse_csr { + +using SparseCsrTensor = Tensor; + +inline bool is_sparse_compressed(const Layout& layout) { + switch (layout) { + case kSparseCsr: + case kSparseCsc: + case kSparseBsr: + case kSparseBsc: + return true; + default:; + } + return false; +} + +inline bool is_sparse_compressed(const Tensor& self) { + return is_sparse_compressed(self.layout()); +} + +inline SparseCsrTensorImpl* get_sparse_csr_impl(const SparseCsrTensor& self) { + AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS( + self.layout(), "get_sparse_csr_impl", [&] {}); + return static_cast(self.unsafeGetTensorImpl()); +} + +inline std::string layoutToString( + Layout layout, + bool upper = false, + bool lower = false) { + switch (layout) { + case kSparseCsr: + return (upper ? "CSR" : (lower ? "csr" : "Csr")); + case kSparseCsc: + return (upper ? "CSC" : (lower ? "csc" : "Csc")); + case kSparseBsr: + return (upper ? "BSR" : (lower ? "bsr" : "Bsr")); + case kSparseBsc: + return (upper ? "BSC" : (lower ? "bsc" : "Bsc")); + default: + TORCH_CHECK(false, "Not a sparse compressed layout:", layout); + return ""; + } +} + +inline bool isCompressedRow(Layout layout) { + return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( + layout, "isCompressedRow", [&] { return true; }, [&] { return false; }); +} + +inline bool isCompressedColumn(Layout layout) { + return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( + layout, + "isCompressedColumn", + [&] { return false; }, + [&] { return true; }); +} + +inline std::string compressedIndicesName(Layout layout) { + return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( + layout, + "compressedIndicesName", + [&] { return "crow_indices"; }, + [&] { return "ccol_indices"; }); +} + +inline std::string plainIndicesName(Layout layout) { + return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( + layout, + "plainIndicesName", + [&] { return "col_indices"; }, + [&] { return "row_indices"; }); +} + +inline std::string compressedDimName(Layout layout) { + switch (layout) { + case kSparseCsr: + return "row"; + case kSparseCsc: + return "column"; + case kSparseBsr: + return "row block"; + case kSparseBsc: + return "column block"; + default: + TORCH_CHECK(false, "Not a sparse compressed layout:", layout); + return ""; + } +} + +inline std::string plainDimName(Layout layout) { + switch (layout) { + case kSparseCsr: + return "column"; + case kSparseCsc: + return "row"; + case kSparseBsr: + return "column block"; + case kSparseBsc: + return "row block"; + default: + TORCH_CHECK(false, "Not a sparse compressed layout:", layout); + return ""; + } +} + +inline size_t rowDimension(Layout layout, IntArrayRef size) { + return size.size() - (isCompressedRow(layout) ? 2 : 1); +} + +inline size_t columnDimension(Layout layout, IntArrayRef size) { + return size.size() - (isCompressedColumn(layout) ? 2 : 1); +} + +inline size_t compressedDimension( + Layout layout, + IntArrayRef size, + size_t dense_ndim = 0) { + return size.size() - dense_ndim - (isCompressedRow(layout) ? 2 : 1); +} + +inline size_t plainDimension( + Layout layout, + IntArrayRef size, + size_t dense_ndim = 0) { + return size.size() - dense_ndim - (isCompressedRow(layout) ? 1 : 2); +} + +inline int64_t numBatchDimensions(Tensor const& self) { + return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( + self.layout(), + "numBatchDimensions", + [&self] { return self.crow_indices().dim() - 1; }, + [&self] { return self.ccol_indices().dim() - 1; }); +} + +inline std::pair getCompressedPlainIndices(Tensor const& self) { + return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( + self.layout(), + "getCompressedPlainIndices", + [&self] { + return std::make_pair(self.crow_indices(), self.col_indices()); + }, + [&self] { + return std::make_pair(self.ccol_indices(), self.row_indices()); + }); +} + +inline Layout flip_compressed_layout(Layout layout) { + switch (layout) { + case kSparseCsr: + return kSparseCsc; + case kSparseCsc: + return kSparseCsr; + case kSparseBsr: + return kSparseBsc; + case kSparseBsc: + return kSparseBsr; + default: + TORCH_CHECK(false, "Not a sparse compressed layout:", layout); + return kSparseCsr; + } +} + +inline DimVector getBlockSize(Tensor const& self) { + int64_t n_batch = numBatchDimensions(self); + return at::DimVector(self.values().sizes().slice(n_batch + 1, 2)); +} + +inline at::OptionalArray getSymIntBlockSize(Tensor const& self) { + if (self.layout() == at::kSparseBsr || self.layout() == at::kSparseBsc) { + int64_t n_batch = numBatchDimensions(self); + return self.values().sym_sizes().slice(n_batch + 1, 2).vec(); + } else { + return {}; + } +} + +template +inline bool only_sparse_compressed_binary_op_trivial_cases( + const Tensor& self, + const Tensor& other, + const Scalar& alpha, + Tensor& out, + const binary_op_t& binary_op, + const binary_op_out_t& binary_op_out) { + // Only sparse compressed! Just like the name says :) + TORCH_INTERNAL_ASSERT(at::sparse_csr::is_sparse_compressed(self)); + TORCH_INTERNAL_ASSERT(at::sparse_csr::is_sparse_compressed(other)); + TORCH_INTERNAL_ASSERT(at::sparse_csr::is_sparse_compressed(out)); + + // Bypass BLAS if there are matches in (self, other, out) + if (self.is_same(out) && self.is_same(other)) { + binary_op_out(self.values(), other.values(), alpha); + return true; + } + if (self.is_same(other)) { + auto [compressed_indices, plain_indices] = + at::sparse_csr::getCompressedPlainIndices(self); + static_cast(out.unsafeGetTensorImpl()) + ->set_member_tensors( + compressed_indices, + plain_indices, + binary_op(self.values(), other.values(), alpha), + self.sizes()); + return true; + } + return false; +} + +inline bool only_sparse_compressed_add_trivial_cases( + const Tensor& self, + const Tensor& other, + const Scalar& alpha, + Tensor& out) { + return only_sparse_compressed_binary_op_trivial_cases( + self, + other, + alpha, + out, + [](const Tensor& v1, const Tensor& v2, const Scalar& alpha) { + return v1.add(v2, alpha); + }, + [](const Tensor& v1, const Tensor& v2, const Scalar& alpha) { + return v1.add_(v2, alpha); + }); +} + +inline Tensor to_type(const Tensor& input, ScalarType dtype) { + auto [compressed_indices, plain_indices] = + at::sparse_csr::getCompressedPlainIndices(input); + return at::_sparse_compressed_tensor_unsafe( + compressed_indices, + plain_indices, + std::move(input.values()).to(dtype), + input.sizes(), + dtype, + input.layout(), + input.device(), + input.options().pinned_memory_opt()); +} + +template +inline std::tuple create_acc_buffer( + TensorOptions option, + ScalarType type, + int64_t nnz = -1) { + Tensor new_values, new_values_acc; + constexpr bool need_acc = !std::is_same_v; + bool is_integral = at::isIntegralType(type, /*includeBool=*/true); + if constexpr (need_acc) { + auto acc_dtype = CppTypeToScalarType::value; + new_values_acc = at::empty({}, option.dtype(acc_dtype)); + new_values = is_integral ? new_values_acc : at::empty({}, option); + } else { + new_values = new_values_acc = at::empty({}, option); + } + if (nnz != -1) { + return std::make_tuple( + new_values.resize_(nnz), new_values_acc.resize_(nnz)); + } else { + return std::make_tuple(new_values, new_values_acc); + } +} + +inline void copy_from_acc_buffer(Tensor& new_values, Tensor& new_values_acc) { + if (!new_values_acc.is_same(new_values)) { + new_values.copy_(new_values_acc); + } +} + +} // namespace at::sparse_csr diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/Storage.h b/venv/lib/python3.10/site-packages/torch/include/ATen/Storage.h new file mode 100644 index 0000000000000000000000000000000000000000..5d6285281f23ec9adf7d916d40d743283980e053 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/Storage.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/Tensor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/Tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..0b3719cca3bf1ff7154625c510c8292dd47444a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/Tensor.h @@ -0,0 +1,3 @@ +#pragma once + +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/TensorAccessor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/TensorAccessor.h new file mode 100644 index 0000000000000000000000000000000000000000..528ed7b8762be5f681c759a5ce8a90aa8d4225d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/TensorAccessor.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h b/venv/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h new file mode 100644 index 0000000000000000000000000000000000000000..41f14a15ba99c2bb2eb81aeaadbd0b08ac086c4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h @@ -0,0 +1,144 @@ +#pragma once + +#include +#include + +namespace at { + +// Return if the tensor geometry represented by `sizes` and `strides` is +// contiguous Although we cache is_contiguous in tensor now, this is till useful +// because it allows checking if a particular geometry is contiguous without +// explicitly constructing a tensor, e.g., when you want to choose a kernel +// strategy based on whether a subgeometry is contiguous. +TORCH_API bool geometry_is_contiguous(IntArrayRef sizes, IntArrayRef strides); + +struct TORCH_API TensorGeometry { + TensorGeometry() = default; + + explicit TensorGeometry(c10::SymIntArrayRef sizes) + : sizes_(sizes.vec()), + strides_(sizes.size()), + has_symbolic_sizes_strides_( + !c10::asIntArrayRefSlowOpt(sizes).has_value()) { + int64_t dim = static_cast(sizes.size()); + c10::SymInt expected_stride = 1; + for (int64_t i = dim - 1; i >= 0; i--) { + strides_[i] = expected_stride; + expected_stride *= sizes_[i]; + } + numel_ = expected_stride; + } + + explicit TensorGeometry(const TensorBase& t) + : sizes_(t.sym_sizes().vec()), + strides_(t.sym_strides().vec()), + storage_offset_(t.sym_storage_offset()), + numel_(t.sym_numel()), + has_symbolic_sizes_strides_( + t.unsafeGetTensorImpl()->has_symbolic_sizes_strides()) {} + + // true if the tensor is contiguous + bool is_contiguous() const; + + int64_t dim() const { + return static_cast(sizes_.size()); + } + + int64_t size(int64_t dim) const { + TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); + dim = c10::maybe_wrap_dim(dim, this->dim()); + return sizes_.at(static_cast(dim)).as_int_unchecked(); + } + c10::IntArrayRef sizes() const { + TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); + return c10::asIntArrayRefUnchecked(sizes_); + } + int64_t stride(int64_t dim) const { + TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); + dim = c10::maybe_wrap_dim(dim, this->dim()); + return strides_.at(static_cast(dim)).as_int_unchecked(); + } + c10::IntArrayRef strides() const { + TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); + return c10::asIntArrayRefUnchecked(strides_); + } + int64_t storage_offset() const { + TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); + return storage_offset_.as_int_unchecked(); + } + int64_t numel() const { + TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); + return numel_.as_int_unchecked(); + } + + c10::SymInt sym_size(int64_t dim) const { + dim = c10::maybe_wrap_dim(dim, this->dim()); + return sizes_.at(static_cast(dim)); + } + c10::SymIntArrayRef sym_sizes() const { + return sizes_; + } + c10::SymInt sym_stride(int64_t dim) const { + dim = c10::maybe_wrap_dim(dim, this->dim()); + return strides_.at(static_cast(dim)); + } + c10::SymIntArrayRef sym_strides() const { + return strides_; + } + c10::SymInt sym_storage_offset() const { + return storage_offset_; + } + c10::SymInt sym_numel() const { + return numel_; + } + + TensorGeometry transpose(int64_t dim0, int64_t dim1) { + TensorGeometry r = *this; // copy + TORCH_CHECK( + dim0 < dim(), + "transpose: dim0=", + dim0, + " out of range (dim=", + dim(), + ")") + TORCH_CHECK( + dim1 < dim(), + "transpose: dim1=", + dim1, + " out of range (dim=", + dim(), + ")") + std::swap(r.sizes_[dim0], r.sizes_[dim1]); + std::swap(r.strides_[dim0], r.strides_[dim1]); + return r; + } + + std::vector& mutable_sizes() { + return sizes_; + } + std::vector& mutable_strides() { + return strides_; + } + c10::SymInt& mutable_storage_offset() { + return storage_offset_; + } + void recompute() { + // recalculate numel after a change + c10::SymInt numel = 1; + for (const auto& i : sizes_) { + numel = numel * i; + } + numel_ = std::move(numel); + has_symbolic_sizes_strides_ = + !c10::asIntArrayRefSlowOpt(sizes_).has_value(); + } + + private: + std::vector sizes_; + std::vector strides_; + c10::SymInt storage_offset_; + c10::SymInt numel_; + bool has_symbolic_sizes_strides_{false}; +}; + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h new file mode 100644 index 0000000000000000000000000000000000000000..8c5003a676d80fea79e7facab42a2818d9e2aa74 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h @@ -0,0 +1,137 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { + +class Tensor; + +namespace impl { + +// Use this to define the prototype for a meta function. There are two +// versions; one that takes one argument (just the operator name), or FUNC2 +// variant that takes two arguments (operator name and overload name). +// +// Example usage: +// +// TORCH_META_FUNC2(add, Tensor) ( +// const Tensor& self, const Tensor& other +// ) { +// ... compute sizes and options ... +// set_output(sizes, options); +// } +// +#define TORCH_META_FUNC(name) void structured_##name::meta +#define TORCH_META_FUNC2(name, overload) \ + void structured_##name##_##overload::meta + +// These are versions of TORCH_META_FUNC(2) that include a precompute_out struct +// as a return value. They should be used when the kernel in question has +// precomputed values declared in native_functions.yaml and the corresponding +// implementation should return an instance of the aforementioned struct. +#define TORCH_PRECOMPUTE_META_FUNC(name) \ + structured_##name::meta_return_ty structured_##name::meta +#define TORCH_PRECOMPUTE_META_FUNC2(name, overload) \ + structured_##name##_##overload::meta_return_ty \ + structured_##name##_##overload::meta + +// Use this to create a precompute struct in a meta function. +#define TORCH_PRECOMPUTE_STRUCT(name) structured_##name::precompute_out<> +#define TORCH_PRECOMPUTE_STRUCT2(name, overload) \ + structured_##name##_##overload::precompute_out<> + +// Use this to define the prototype for an implementation. This takes only +// one argument, which is the name of the dispatch key entry you're +// implementing. +// +// Example usage: +// +// TORCH_IMPL_FUNC(add_cpu) ( +// Tensor& result, const Tensor& self, const Tensor& other +// ) { +// ... do the actual implementation ... +// } +// +#define TORCH_IMPL_FUNC(name) void structured_##name::impl + +// Base class for all structured kernel classes. The set_output virtual +// method is varied depending whether or not the operator is +// functional/out/inplace, and could also be specialized for CPU/CUDA/etc +// (although presently it isn't). +// +// A notable subclass of this interface is TensorIteratorBase. +struct TORCH_API MetaBase { + MetaBase() = default; + MetaBase(const MetaBase&) = default; + MetaBase& operator=(const MetaBase&) = default; + MetaBase(MetaBase&&) noexcept = default; + MetaBase& operator=(MetaBase&&) noexcept = default; + virtual const Tensor& maybe_get_output(int64_t output_idx) = 0; + + // Note: [set_output_*] + // See: https://github.com/pytorch/pytorch/issues/69813 + // Whenever defining the output properties in the META function of a + // structured kernel (what was usually done with `set_output`), use one of + // these 3 variants, instead. In order to decide which variant to use, check + // the following decision tree: + // + // - Can the kernel you are going to implement support output tensors + // with arbitrary strides? + // | + // -- YES: `set_output_raw_strided` + // | + // -- NO: Should the output tensor strides be contiguous? + // | + // -- YES: `set_output_contiguous` + // | + // -- NO: `set_output_strided` + // + // Use this function whenever the kernel requires specific strides for the + // output. If `strides` does not match the given output strides, proxy outputs + // will be created and passed to the IMPL function. + virtual void set_output_strided( + int64_t output_idx, + IntArrayRef sizes, + IntArrayRef strides, + TensorOptions options, + DimnameList names = {}) { + TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented."); + } + + // Use this function whenever the kernel knows how to handle arbitrary strided + // outputs. This function has the same behavior as the old `set_output`: it + // will only re-stride if the given output was resized. + virtual void set_output_raw_strided( + int64_t output_idx, + IntArrayRef sizes, + IntArrayRef strides_hint, + TensorOptions options, + DimnameList names = {}) { + TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented."); + } + + // Use this function if the kernel requires contiguous strides. + // Alias for `set_output_strided`, but with contiguous strides. + void set_output_contiguous( + int64_t output_idx, + IntArrayRef sizes, + TensorOptions options, + DimnameList names = {}) { + auto strides = c10::contiguous_strides(sizes); + set_output_strided(output_idx, sizes, strides, options, names); + } + + // Returns a reference to an undefined tensor if there is no presupplied + // output + const Tensor& maybe_get_output() { + return maybe_get_output(0); + } + virtual ~MetaBase() = default; +}; + +} // namespace impl + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalState.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalState.h new file mode 100644 index 0000000000000000000000000000000000000000..8419499c3a563c1001a13685278523a64495c3db --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalState.h @@ -0,0 +1,113 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +// Thread local state contains values that are preserved across +// thread boundaries (e.g. at::launch/JIT fork, autograd). +// Note at::parallel_for doesn't preserve TLS across thread boundaries. +class TORCH_API ThreadLocalState { + public: + // Saves the thread local variables' values and + // returns them as a ThreadLocalState + ThreadLocalState(); + + // set_grad_mode - force the value of the grad mode TLS in + // the current state object. This is used for example in the + // autograd engine. + void set_grad_mode(bool enabled); + + // set_multithreading_enabled - force the value of the multithreadinmaximum + // threads TLS in + // the current state object. This is used for example in the + // autograd engine. + void set_multithreading_enabled(bool enabled); + + // Sets thread local variables in the current thread, + // according to the thread boundary specified + static void setThreadLocalState(const ThreadLocalState& state); + + private: + c10::impl::LocalDispatchKeySet dispatch_key_; + + // ThreadLocalDebugInfo does not change after being created + // with DebugInfoGuard + std::shared_ptr debug_info_; + + // RecordFunction TLS + RecordFunctionTLS rf_tls_; + + // TLS for out-of-tree functorch + // See NOTE [functorch TLS in pytorch/pytorch] for why this needs to be a + // pointer (spoiler alert: it's due to the indirection) + // This needs to be a shared_ptr instead of a unique_ptr because + // ThreadLocalState is copy-able and does indeed get copied. Maybe we can + // consider adding an explicit copy constructor for ThreadLocalState in the + // future but I didn't want to add one just for this. + std::shared_ptr functorch_tls_; + + // TLS for AutogradModes + AutogradState autograd_tls_; + + // TLS for enable_torch_dispatch_mode + c10::impl::TorchDispatchModeTLS torch_dispatch_mode_state_; + + // TLS for enable_python_dispatcher + c10::impl::PyInterpreter* python_dispatcher_state_; + + // TLS for __torch_function__ (mode and disable_torch_function) + at::impl::PythonTorchFunctionTLS python_torch_function_state_; + + // TLS for saved tensors default hooks + at::impl::SavedTensorDefaultHooksTLS saved_tensors_default_hooks_state_; + + bool functionalization_reapply_views_state_; + + // TLS for arbitrary python objects that is registered via hooks + at::impl::ThreadLocalPythonObjects saved_objects_; + + friend class ThreadLocalStateGuard; +}; + +// Guard to set and reset the thread local state +class TORCH_API ThreadLocalStateGuard { + public: + explicit ThreadLocalStateGuard(const ThreadLocalState& state) + : prev_state_(ThreadLocalState()) { + // set the given state across the thread boundary + ThreadLocalState::setThreadLocalState(state); + } + + ~ThreadLocalStateGuard() { + // restore previously set variables + ThreadLocalState::setThreadLocalState(prev_state_); + } + + private: + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const ThreadLocalState prev_state_; +}; + +template +auto wrapPropagateTLSState(T callback) { + return [tls_state = ThreadLocalState(), + callback = std::move(callback)](auto&&... args) { + ThreadLocalStateGuard g(tls_state); + // Propagate value returned by callback(). + return callback(std::forward(args)...); + }; +} + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/Utils.h b/venv/lib/python3.10/site-packages/torch/include/ATen/Utils.h new file mode 100644 index 0000000000000000000000000000000000000000..17826b332afbcf9f2e2a328ed3a938cc6fecce74 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/Utils.h @@ -0,0 +1,138 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define AT_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + void operator=(const TypeName&) = delete + +namespace at { + +TORCH_API int _crash_if_asan(int); + +// Converts a TensorList (i.e. ArrayRef to vector of TensorImpl*) +// NB: This is ONLY used by legacy TH bindings, and ONLY used by cat. +// Once cat is ported entirely to ATen this can be deleted! +static inline std::vector checked_dense_tensor_list_unwrap( + ArrayRef tensors, + const char* name, + int pos, + c10::DeviceType device_type, + ScalarType scalar_type) { + std::vector unwrapped; + unwrapped.reserve(tensors.size()); + for (const auto i : c10::irange(tensors.size())) { + const auto& expr = tensors[i]; + if (expr.layout() != Layout::Strided) { + AT_ERROR( + "Expected dense tensor but got ", + expr.layout(), + " for sequence element ", + i, + " in sequence argument at position #", + pos, + " '", + name, + "'"); + } + if (expr.device().type() != device_type) { + AT_ERROR( + "Expected object of device type ", + device_type, + " but got device type ", + expr.device().type(), + " for sequence element ", + i, + " in sequence argument at position #", + pos, + " '", + name, + "'"); + } + if (expr.scalar_type() != scalar_type) { + AT_ERROR( + "Expected object of scalar type ", + scalar_type, + " but got scalar type ", + expr.scalar_type(), + " for sequence element ", + i, + " in sequence argument at position #", + pos, + " '", + name, + "'"); + } + unwrapped.emplace_back(expr.unsafeGetTensorImpl()); + } + return unwrapped; +} + +template +std::array check_intlist( + ArrayRef list, + const char* name, + int pos) { + if (list.empty()) { + // TODO: is this necessary? We used to treat nullptr-vs-not in IntList + // differently with strides as a way of faking optional. + list = {}; + } + auto res = std::array(); + if (list.size() == 1 && N > 1) { + res.fill(list[0]); + return res; + } + if (list.size() != N) { + AT_ERROR( + "Expected a list of ", + N, + " ints but got ", + list.size(), + " for argument #", + pos, + " '", + name, + "'"); + } + std::copy_n(list.begin(), N, res.begin()); + return res; +} + +using at::detail::check_size_nonnegative; + +namespace detail { + +template +TORCH_API Tensor tensor_cpu(ArrayRef values, const TensorOptions& options); + +template +TORCH_API Tensor +tensor_backend(ArrayRef values, const TensorOptions& options); + +template +TORCH_API Tensor +tensor_complex_cpu(ArrayRef values, const TensorOptions& options); + +template +TORCH_API Tensor +tensor_complex_backend(ArrayRef values, const TensorOptions& options); +} // namespace detail + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ceil_div.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ceil_div.h new file mode 100644 index 0000000000000000000000000000000000000000..37d67b232a22c11fa7dccf638b7897c0854ab8bd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ceil_div.h @@ -0,0 +1,24 @@ +#pragma once +#include +#include + +namespace at { + +/** + Computes ceil(a / b) +*/ +template >> +C10_ALWAYS_INLINE C10_HOST_DEVICE T ceil_div(T a, T b) { + return (a + b - 1) / b; +} + +/** + Computes ceil(a / b) * b; i.e., rounds up `a` to the next highest + multiple of b +*/ +template +C10_ALWAYS_INLINE C10_HOST_DEVICE T round_up(T a, T b) { + return ceil_div(a, b) * b; +} + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/code_template.h b/venv/lib/python3.10/site-packages/torch/include/ATen/code_template.h new file mode 100644 index 0000000000000000000000000000000000000000..393e322e6fe66c8f6d11db9c968800ed5134b61c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/code_template.h @@ -0,0 +1,243 @@ +#pragma once + +#include + +#include +#include +#include +#include + +namespace at::jit { + +// A template environment is a mapping from template variable names, e.g., +// identifier (corresponding to $identifier) to their expansions. +// +// This template environment supports storing strings, numbers and lists +// of strings, and can be chained together (so that lookup proceeds in +// in the top level environment, and then recurses into a parent +// environment if the key is not found.) +struct TemplateEnv { + TemplateEnv() = default; + TemplateEnv(TemplateEnv& parent) : parent(&parent) {} + + using string_list = std::vector; + + // Add a string 'v' to the map at key 'k'. + void s(const std::string& k, const std::string& v) { + strings_[k] = v; + lists_.erase(k); + } + + // Add a number 'v' to the map at key 'k' + template + void d(const std::string& k, const T& v) { + strings_[k] = c10::to_string(v); + lists_.erase(k); + } + + // Retrieve the string representation of the value stored at 'k' from the map. + // Raises an exception if the key is not found. + const std::string& s(const std::string& k) const { + if (strings_.count(k) == 0) { + if (parent) { + return parent->s(k); + } + notFound(k); + } + return strings_.at(k); + } + + // Store a list of strings 'v' in the map at 'k'. + void v(const std::string& k, const string_list& v) { + lists_[k] = v; + strings_.erase(k); + } + + // Retrieve a list of strings stored at 'k' from the map. + // Raises an exception if the key is not found. + const string_list& v(const std::string& k) const { + if (lists_.count(k) == 0) { + if (parent) { + return parent->v(k); + } + notFound(k); + } + return lists_.at(k); + } + + // Test if a string 'k' is a string (as opposed to a list.) + bool keyIsString(const std::string& k) const { + if (strings_.count(k) > 0) + return true; + if (lists_.count(k) > 0) + return false; + if (parent) + return parent->keyIsString(k); + notFound(k); + } + + private: + [[noreturn]] void notFound(const std::string& k) const { + std::stringstream ss; + ss << "key not found: " << k; + throw std::logic_error(ss.str()); + } + + std::unordered_map strings_; + std::unordered_map lists_; + TemplateEnv* parent{nullptr}; +}; + +/* +# Match $identifier or ${identifier} and replace with the value in env. +# If this identifier is at the beginning of whitespace on a line +# and its value is a list then it is treated as +# block substitution by indenting all lines of all elements. +# If the identifier is on a line starting with non-whitespace and a list +# then it is comma separated. ${,foo} will insert a comma before the list +# if this list is not empty and ${foo,} will insert one after. +*/ +struct CodeTemplate { + /* implicit */ CodeTemplate(std::string t) : template_text(std::move(t)) {} + + std::string format(const TemplateEnv& env) const { + std::stringstream out; + size_t pos = 0; + size_t indent = 0; + bool all_whitespace = true; + while (pos < template_text.size()) { + char c = template_text[pos]; + if (c == '$') { + std::stringstream kss; + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + bool comma_before; + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + bool comma_after; + size_t new_pos = parseKey(pos, kss, comma_before, comma_after); + std::string k = kss.str(); + bool is_string = env.keyIsString(k); + if (all_whitespace) { + if (is_string) + emitStringWithIndents(out, indent, env.s(k)); + else + emitLinesIndented(out, indent, env.v(k)); + } else { + if (is_string) + out << env.s(k); + else + emitCommaSeparatedList(out, env.v(k), comma_before, comma_after); + } + all_whitespace = false; + pos = new_pos; + } else { + out << c; + if (!isspace(c)) + all_whitespace = false; + indent++; + if (c == '\n') { + indent = 0; + all_whitespace = true; + } + pos++; + } + } + return out.str(); + } + + private: + using string_list = std::vector; + char charAt(size_t p) const { + if (p >= template_text.size()) + throw std::logic_error("EOS found in key"); + return template_text[p]; + } + size_t parseKey( + size_t pos, + std::ostream& k, + bool& comma_before, + bool& comma_after) const { + comma_before = false; + comma_after = false; + pos++; + if (charAt(pos) == '{') { + pos++; + if (charAt(pos) == ',') { + comma_before = true; + pos++; + } + pos = parseIdent(pos, k); + if (charAt(pos) == ',') { + comma_after = true; + pos++; + } + if (charAt(pos) != '}') + throw std::logic_error("missing terminating '}'"); + pos++; + return pos; + } else { + return parseIdent(pos, k); + } + } + size_t parseIdent(size_t pos, std::ostream& k) const { + while (pos < template_text.size() && + (isalnum(template_text[pos]) || template_text[pos] == '_')) { + k << template_text[pos]; + pos++; + } + return pos; + } + void emitCommaSeparatedList( + std::ostream& out, + const string_list& strings, + bool comma_before, + bool comma_after) const { + if (comma_before && !strings.empty()) + out << ", "; + for (const auto i : c10::irange(strings.size())) { + if (i > 0) + out << ", "; + out << strings[i]; + } + if (comma_after && !strings.empty()) + out << ", "; + } + // These indentation functions follow the convention that they never emit + // leading or trailing newlines when the input string does not have leading + // or trailing newlines. It's the responsibility of the calling function + // to indent correctly in the context. + void emitIndent(std::ostream& out, size_t indent) const { + for (C10_UNUSED const auto i : c10::irange(indent)) { + out << " "; + } + } + void emitStringWithIndents( + std::ostream& out, + size_t indent, + const std::string& str) const { + for (auto c : str) { + out << c; + if (c == '\n') { + emitIndent(out, indent); + } + } + } + void emitLinesIndented( + std::stringstream& out, + size_t indent, + const string_list& strings) const { + for (const auto i : c10::irange(strings.size())) { + if (i > 0) + emitIndent(out, indent); + emitStringWithIndents(out, indent, strings[i]); + if (i + 1 != strings.size()) + out << "\n"; + } + } + std::string template_text; +}; + +static inline std::string format(const std::string& fmt, TemplateEnv& env) { + return CodeTemplate(fmt).format(env); +} + +} // namespace at::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cpp_custom_type_hack.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cpp_custom_type_hack.h new file mode 100644 index 0000000000000000000000000000000000000000..1367ef94df7385f31da9ffd09e81b9e2b209b4a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cpp_custom_type_hack.h @@ -0,0 +1,110 @@ +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP + +// YOU ARE IN THE WRONG PLACE! TURN BACK NOW! + +// This code was a temporary hack to enable embedding arbitrary C++ structures +// into Tensors. THIS IS UNSAFE AND IS NOT SUPPORTED. IF YOU USE THIS CODE, +// IT __WILL__ BREAK. + +// This code has been superseded by custom classes: +// https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html + +// Please use custom classes and **DO NOT ADD MORE CALLSITES TO THINGS DEFINED +// IN THIS FILE**. + +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP + +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at::cpp_custom_type_hack { + +template +[[deprecated( + "Use custom classes instead: " + "https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html")]] bool +isa(const Tensor& packed) { + return (packed.scalar_type() == kByte) && + (packed.storage().data_ptr().get_deleter() == + caffe2::TypeMeta::Make().deleteFn()); +} + +template +[[deprecated( + "Use custom classes instead: " + "https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html")]] T& +cast(const Tensor& packed) { + TORCH_CHECK( + packed.scalar_type() == kByte, "Expected temporary cpp type wrapper"); + TORCH_CHECK( + packed.storage().data_ptr().get_deleter() == + caffe2::TypeMeta::Make().deleteFn(), + "Expected temporary cpp type wrapper of type ", + caffe2::TypeMeta::TypeName()); + return *reinterpret_cast(packed.storage().data_ptr().get()); +} + +template +[[deprecated( + "Use custom classes instead: " + "https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html")]] Tensor +create(std::unique_ptr ptr, TensorOptions options) { + // None of this should trace, so turn off Tracer dispatching + at::AutoDispatchBelowADInplaceOrView guard; // TODO: remove + at::tracer::impl::NoTracerDispatchMode tracer_guard; + + // We store this instance away in a Tensor and register a deleter function + // so that we do not leak memory. On the other side, we pull out the storage's + // data_ptr and get the right typed pointer. + void* raw_ptr = ptr.release(); + at::DataPtr at_ptr( + raw_ptr, raw_ptr, caffe2::TypeMeta::Make().deleteFn(), at::kCPU); + + // size doesn't really matter, but we can align it to the actual size + // returning variables because one likely want to use this hack from python + auto retval = at::empty({sizeof(T)}, options.device(kCPU).dtype(at::kByte)); + retval.storage().set_data_ptr_noswap(std::move(at_ptr)); + return retval; +} + +} // namespace at::cpp_custom_type_hack diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/div_rtn.h b/venv/lib/python3.10/site-packages/torch/include/ATen/div_rtn.h new file mode 100644 index 0000000000000000000000000000000000000000..4935f49ae2726389441e4012cc15bcf3981f2e84 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/div_rtn.h @@ -0,0 +1,11 @@ +#pragma once + +// Integer division rounding to -Infinity +template +static inline T div_rtn(T x, T y) { + int q = x / y; + int r = x % y; + if ((r != 0) && ((r < 0) != (y < 0))) + --q; + return q; +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/dlpack.h b/venv/lib/python3.10/site-packages/torch/include/ATen/dlpack.h new file mode 100644 index 0000000000000000000000000000000000000000..9601a2478ddde2502581f5b1801557a1b57f3853 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/dlpack.h @@ -0,0 +1,232 @@ +/*! + * Copyright (c) 2017 by Contributors + * \file dlpack.h + * \brief The common header of DLPack. + */ +#ifndef DLPACK_DLPACK_H_ +#define DLPACK_DLPACK_H_ + +/** + * \brief Compatibility with C++ + */ +#ifdef __cplusplus +#define DLPACK_EXTERN_C extern "C" +#else +#define DLPACK_EXTERN_C +#endif + +/*! \brief The current version of dlpack */ +#define DLPACK_VERSION 80 + +/*! \brief The current ABI version of dlpack */ +#define DLPACK_ABI_VERSION 1 + +/*! \brief DLPACK_DLL prefix for windows */ +#ifdef _WIN32 +#ifdef DLPACK_EXPORTS +#define DLPACK_DLL __declspec(dllexport) +#else +#define DLPACK_DLL __declspec(dllimport) +#endif +#else +#define DLPACK_DLL +#endif + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif +/*! + * \brief The device type in DLDevice. + */ +#ifdef __cplusplus +typedef enum : int32_t { +#else +typedef enum { +#endif + /*! \brief CPU device */ + kDLCPU = 1, + /*! \brief CUDA GPU device */ + kDLCUDA = 2, + /*! + * \brief Pinned CUDA CPU memory by cudaMallocHost + */ + kDLCUDAHost = 3, + /*! \brief OpenCL devices. */ + kDLOpenCL = 4, + /*! \brief Vulkan buffer for next generation graphics. */ + kDLVulkan = 7, + /*! \brief Metal for Apple GPU. */ + kDLMetal = 8, + /*! \brief Verilog simulator buffer */ + kDLVPI = 9, + /*! \brief ROCm GPUs for AMD GPUs */ + kDLROCM = 10, + /*! + * \brief Pinned ROCm CPU memory allocated by hipMallocHost + */ + kDLROCMHost = 11, + /*! + * \brief Reserved extension device type, + * used for quickly test extension device + * The semantics can differ depending on the implementation. + */ + kDLExtDev = 12, + /*! + * \brief CUDA managed/unified memory allocated by cudaMallocManaged + */ + kDLCUDAManaged = 13, + /*! + * \brief Unified shared memory allocated on a oneAPI non-partititioned + * device. Call to oneAPI runtime is required to determine the device + * type, the USM allocation type and the sycl context it is bound to. + * + */ + kDLOneAPI = 14, + /*! \brief GPU support for next generation WebGPU standard. */ + kDLWebGPU = 15, + /*! \brief Qualcomm Hexagon DSP */ + kDLHexagon = 16, +} DLDeviceType; + +/*! + * \brief A Device for Tensor and operator. + */ +typedef struct { + /*! \brief The device type used in the device. */ + DLDeviceType device_type; + /*! + * \brief The device index. + * For vanilla CPU memory, pinned memory, or managed memory, this is set to 0. + */ + int32_t device_id; +} DLDevice; + +/*! + * \brief The type code options DLDataType. + */ +typedef enum { + /*! \brief signed integer */ + kDLInt = 0U, + /*! \brief unsigned integer */ + kDLUInt = 1U, + /*! \brief IEEE floating point */ + kDLFloat = 2U, + /*! + * \brief Opaque handle type, reserved for testing purposes. + * Frameworks need to agree on the handle data type for the exchange to be well-defined. + */ + kDLOpaqueHandle = 3U, + /*! \brief bfloat16 */ + kDLBfloat = 4U, + /*! + * \brief complex number + * (C/C++/Python layout: compact struct per complex number) + */ + kDLComplex = 5U, + /*! \brief boolean */ + kDLBool = 6U, +} DLDataTypeCode; + +/*! + * \brief The data type the tensor can hold. The data type is assumed to follow the + * native endian-ness. An explicit error message should be raised when attempting to + * export an array with non-native endianness + * + * Examples + * - float: type_code = 2, bits = 32, lanes = 1 + * - float4(vectorized 4 float): type_code = 2, bits = 32, lanes = 4 + * - int8: type_code = 0, bits = 8, lanes = 1 + * - std::complex: type_code = 5, bits = 64, lanes = 1 + * - bool: type_code = 6, bits = 8, lanes = 1 (as per common array library convention, the underlying storage size of bool is 8 bits) + */ +typedef struct { + /*! + * \brief Type code of base types. + * We keep it uint8_t instead of DLDataTypeCode for minimal memory + * footprint, but the value should be one of DLDataTypeCode enum values. + * */ + uint8_t code; + /*! + * \brief Number of bits, common choices are 8, 16, 32. + */ + uint8_t bits; + /*! \brief Number of lanes in the type, used for vector types. */ + uint16_t lanes; +} DLDataType; + +/*! + * \brief Plain C Tensor object, does not manage memory. + */ +typedef struct { + /*! + * \brief The data pointer points to the allocated data. This will be CUDA + * device pointer or cl_mem handle in OpenCL. It may be opaque on some device + * types. This pointer is always aligned to 256 bytes as in CUDA. The + * `byte_offset` field should be used to point to the beginning of the data. + * + * Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow, + * TVM, perhaps others) do not adhere to this 256 byte aligment requirement + * on CPU/CUDA/ROCm, and always use `byte_offset=0`. This must be fixed + * (after which this note will be updated); at the moment it is recommended + * to not rely on the data pointer being correctly aligned. + * + * For given DLTensor, the size of memory required to store the contents of + * data is calculated as follows: + * + * \code{.c} + * static inline size_t GetDataSize(const DLTensor* t) { + * size_t size = 1; + * for (tvm_index_t i = 0; i < t->ndim; ++i) { + * size *= t->shape[i]; + * } + * size *= (t->dtype.bits * t->dtype.lanes + 7) / 8; + * return size; + * } + * \endcode + */ + void* data; + /*! \brief The device of the tensor */ + DLDevice device; + /*! \brief Number of dimensions */ + int32_t ndim; + /*! \brief The data type of the pointer*/ + DLDataType dtype; + /*! \brief The shape of the tensor */ + const int64_t* shape; + /*! + * \brief strides of the tensor (in number of elements, not bytes) + * can be NULL, indicating tensor is compact and row-majored. + */ + const int64_t* strides; + /*! \brief The offset in bytes to the beginning pointer to data */ + uint64_t byte_offset; +} DLTensor; + +/*! + * \brief C Tensor object, manage memory of DLTensor. This data structure is + * intended to facilitate the borrowing of DLTensor by another framework. It is + * not meant to transfer the tensor. When the borrowing framework doesn't need + * the tensor, it should call the deleter to notify the host that the resource + * is no longer needed. + */ +typedef struct DLManagedTensor { + /*! \brief DLTensor which is being memory managed */ + DLTensor dl_tensor; + /*! \brief the context of the original host framework of DLManagedTensor in + * which DLManagedTensor is used in the framework. It can also be NULL. + */ + void * manager_ctx; + /*! \brief Destructor signature void (*)(void*) - this should be called + * to destruct manager_ctx which holds the DLManagedTensor. It can be NULL + * if there is no way for the caller to provide a reasonable destructor. + * The destructors deletes the argument self as well. + */ + void (*deleter)(struct DLManagedTensor * self); +} DLManagedTensor; +#ifdef __cplusplus +} // DLPACK_EXTERN_C +#endif +#endif // DLPACK_DLPACK_H_ diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runner/model_container_runner.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runner/model_container_runner.h new file mode 100644 index 0000000000000000000000000000000000000000..bf5932068b591e7d8367ca0fbfe3578f8becb806 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runner/model_container_runner.h @@ -0,0 +1,84 @@ +#if !defined(C10_MOBILE) && !defined(ANDROID) +#pragma once + +#include +#include + +// Forward declare DynamicLibrary +namespace at { +struct DynamicLibrary; +} + +namespace torch::inductor { +using TensorConstantMap = std::unordered_map; + +class TORCH_API AOTIModelContainerRunner { + public: + AOTIModelContainerRunner() = delete; + AOTIModelContainerRunner(const AOTIModelContainerRunner& other) = delete; + AOTIModelContainerRunner(AOTIModelContainerRunner&& other) = delete; + AOTIModelContainerRunner& operator=(const AOTIModelContainerRunner& other) = + delete; + AOTIModelContainerRunner& operator=(AOTIModelContainerRunner&& other) = + delete; + ~AOTIModelContainerRunner(); + + std::vector run( + std::vector& inputs, + AOTInductorStreamHandle cuda_stream_handle = nullptr); + + std::unordered_map getConstantNamesToOriginalFQNs() + const; + std::unordered_map getConstantNamesToDtypes() const; + void update_inactive_constant_buffer(const TensorConstantMap& const_map); + void update_constant_buffer( + const TensorConstantMap& const_map, + bool use_inactive, + bool validate_full_updates); + void run_const_fold( + bool use_inactive, + AOTInductorStreamHandle cuda_stream_handle = nullptr); + void swap_constant_buffer(); + + std::vector get_call_spec(); + + protected: + AOTIModelContainerRunner( + const std::string& model_so_path, + size_t num_models, + const std::string& device_str, + const std::string& cubin_dir); + + std::unique_ptr model_so_; + decltype(&AOTInductorModelContainerCreateWithDevice) create_func_{nullptr}; + decltype(&AOTInductorModelContainerDelete) delete_func_{nullptr}; + decltype(&AOTInductorModelContainerGetNumOutputs) get_num_outputs_func_{ + nullptr}; + decltype(&AOTInductorModelContainerRun) run_func_{nullptr}; + decltype(&AOTInductorModelContainerGetNumConstants) get_num_constants_func_{ + nullptr}; + decltype(&AOTInductorModelContainerGetConstantName) get_constant_name_func_{ + nullptr}; + decltype(&AOTInductorModelContainerGetConstantOriginalFQN) + get_constant_original_fqn_func_{nullptr}; + decltype(&AOTInductorModelContainerGetConstantDtype) get_constant_dtype_func_{ + nullptr}; + decltype(&AOTInductorModelContainerUpdateConstantBuffer) + update_constant_buffer_func_{nullptr}; + decltype(&AOTInductorModelContainerUpdateInactiveConstantBuffer) + update_inactive_constant_buffer_func_{nullptr}; + decltype(&AOTInductorModelContainerRunConstantFolding) run_const_fold_func_{ + nullptr}; + decltype(&AOTInductorModelContainerSwapConstantBuffer) + swap_constant_buffer_func_{nullptr}; + decltype(&AOTInductorModelContainerGetCallSpec) get_call_spec_func_{nullptr}; + + AOTInductorModelContainerHandle container_handle_ = nullptr; + + // TODO: need an OSS proxy executor implementation. For now, + // proxy_executor_handle_ will always be nullptr. + AOTIProxyExecutorHandle proxy_executor_handle_ = nullptr; +}; + +} // namespace torch::inductor +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runner/model_container_runner_cpu.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runner/model_container_runner_cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..eed595930a8bd6fa9cb4866fb9878e17f69ef2d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runner/model_container_runner_cpu.h @@ -0,0 +1,19 @@ +#if !defined(C10_MOBILE) && !defined(ANDROID) +#pragma once + +#include + +namespace torch::inductor { +class TORCH_API AOTIModelContainerRunnerCpu : public AOTIModelContainerRunner { + public: + AOTIModelContainerRunnerCpu( + const std::string& model_so_path, + size_t num_models = 1); + + ~AOTIModelContainerRunnerCpu(); + + std::vector run(std::vector& inputs); +}; + +} // namespace torch::inductor +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runner/model_container_runner_cuda.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runner/model_container_runner_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..5db82bf413668a862c775cf0c1ae1416037837fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runner/model_container_runner_cuda.h @@ -0,0 +1,30 @@ +#if !defined(C10_MOBILE) && !defined(ANDROID) +#pragma once + +#include +#include + +namespace torch::inductor { + +// NOTICE: Following APIs are subject to change due to active development +// We provide NO BC guarantee for these APIs +class TORCH_API AOTIModelContainerRunnerCuda : public AOTIModelContainerRunner { + public: + // @param device_str: cuda device string, e.g. "cuda", "cuda:0" + AOTIModelContainerRunnerCuda( + const std::string& model_so_path, + size_t num_models = 1, + const std::string& device_str = "cuda", + const std::string& cubin_dir = ""); + + ~AOTIModelContainerRunnerCuda(); + + std::vector run(std::vector& inputs); + + std::vector run_with_cuda_stream( + std::vector& inputs, + at::cuda::CUDAStream cuda_stream); +}; + +} // namespace torch::inductor +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runner/pybind.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runner/pybind.h new file mode 100644 index 0000000000000000000000000000000000000000..3797c7e1e6a2798dd238cae06ff3db6eb990495b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runner/pybind.h @@ -0,0 +1,7 @@ +#include + +namespace torch::inductor { + +void initAOTIRunnerBindings(PyObject* module); + +} // namespace torch::inductor diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/arrayref_tensor.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/arrayref_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..a864dbf45bf4bdbaf593fd443bc18291d3273b09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/arrayref_tensor.h @@ -0,0 +1,367 @@ +#pragma once + +#include + +#include +#include +#include + +namespace torch { +namespace aot_inductor { + +// Can't use c10::ArrayRef because it's not truly header-only and +// pulls in other c10 headers. This is (sadly) copy-pasted and +// adapted. +template +class MiniArrayRef final { + public: + using iterator = T*; + using const_iterator = const T*; + using size_type = size_t; + using value_type = T; + + using reverse_iterator = std::reverse_iterator; + + private: + /// The start of the array, in an external buffer. + T* Data; + + /// The number of elements. + size_type Length; + + public: + /// @name Constructors + /// @{ + + /// Construct an empty MiniArrayRef. + /* implicit */ constexpr MiniArrayRef() : Data(nullptr), Length(0) {} + + /// Construct an MiniArrayRef from a single element. + // TODO Make this explicit + constexpr MiniArrayRef(const T& OneElt) : Data(&OneElt), Length(1) {} + + /// Construct an MiniArrayRef from a pointer and length. + constexpr MiniArrayRef(T* data, size_t length) : Data(data), Length(length) {} + + /// Construct an MiniArrayRef from a range. + constexpr MiniArrayRef(T* begin, T* end) : Data(begin), Length(end - begin) {} + + template < + typename Container, + typename = std::enable_if_t().data())>, + T*>::value>> + /* implicit */ MiniArrayRef(Container& container) + : Data(container.data()), Length(container.size()) {} + + /// Construct an MiniArrayRef from a std::vector. + // The enable_if stuff here makes sure that this isn't used for + // std::vector, because MiniArrayRef can't work on a std::vector + // bitfield. + template + /* implicit */ MiniArrayRef(const std::vector& Vec) + : Data(Vec.data()), Length(Vec.size()) { + static_assert( + !std::is_same::value, + "MiniArrayRef cannot be constructed from a std::vector bitfield."); + } + + /// Construct an MiniArrayRef from a std::array + template + /* implicit */ constexpr MiniArrayRef(std::array& Arr) + : Data(Arr.data()), Length(N) {} + + /// Construct an MiniArrayRef from a C array. + template + /* implicit */ constexpr MiniArrayRef(T (&Arr)[N]) : Data(Arr), Length(N) {} + + /// Construct an MiniArrayRef from a std::initializer_list. + /* implicit */ constexpr MiniArrayRef(const std::initializer_list& Vec) + : Data( + std::begin(Vec) == std::end(Vec) ? static_cast(nullptr) + : std::begin(Vec)), + Length(Vec.size()) {} + + /// @} + /// @name Simple Operations + /// @{ + + constexpr iterator begin() const { + return Data; + } + constexpr iterator end() const { + return Data + Length; + } + + // These are actually the same as iterator, since MiniArrayRef only + // gives you const iterators. + constexpr const_iterator cbegin() const { + return Data; + } + constexpr const_iterator cend() const { + return Data + Length; + } + + constexpr reverse_iterator rbegin() const { + return reverse_iterator(end()); + } + constexpr reverse_iterator rend() const { + return reverse_iterator(begin()); + } + + /// empty - Check if the array is empty. + constexpr bool empty() const { + return Length == 0; + } + + constexpr T* data() const { + return Data; + } + + /// size - Get the array size. + constexpr size_t size() const { + return Length; + } + + /// equals - Check for element-wise equality. + constexpr bool equals(MiniArrayRef RHS) const { + return Length == RHS.Length && std::equal(begin(), end(), RHS.begin()); + } + + /// @} + /// @name Operator Overloads + /// @{ + constexpr const T& operator[](size_t Index) const { + return Data[Index]; + } + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + typename std::enable_if::value, MiniArrayRef>::type& + operator=(U&& Temporary) = delete; + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + typename std::enable_if::value, MiniArrayRef>::type& + operator=(std::initializer_list) = delete; +}; + +using MiniIntArrayRef = MiniArrayRef; + +inline bool is_contiguous_strides_for_shape( + int64_t ndim, + const int64_t* strides_ptr, + const int64_t* sizes_ptr) { + int64_t z = 1; + for (int64_t d = ndim - 1; d >= 0; d--) { + const auto& size_d = sizes_ptr[d]; + if (size_d != 1) { + if (strides_ptr[d] == z) { + z *= size_d; + } else { + return false; + } + } + } + return true; +} + +// Shim for AOTI generated code to pretend a raw array works like an +// AtenTensorHandle. +template +class ArrayRefTensor { + public: + ArrayRefTensor() = default; + + explicit ArrayRefTensor( + MiniArrayRef arr, + MiniArrayRef sizes, + MiniArrayRef strides, + int32_t device_type, + int32_t device_idx) + : arrayRef_(arr), + sizes_(sizes), + strides_(strides), + device_type_(device_type), + device_idx_(device_idx), + numel_(arr.size()) { + assert(sizes.size() == strides.size()); + assert(is_contiguous_strides_for_shape( + sizes.size(), strides.data(), sizes.data())); + } + + AtenTensorHandle expensiveCopyToTensor() const { + AtenTensorHandle result; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_empty_strided( + sizes_.size(), + sizes_.data(), + strides_.data(), + aoti_torch_dtype>(), + device_type_, + device_idx_, + &result)); + void* dataPtr; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_data_ptr(result, &dataPtr)); + std::memcpy(dataPtr, data(), numel() * sizeof(T)); + return result; + } + + // We need to look the same as RAIIAtenTensorHandle, which returns + // an owning AtenTensorHandle from release(). So, we allocate one! + AtenTensorHandle release() { + return expensiveCopyToTensor(); + } + + // We don't need to free any memory. + void reset() {} + + auto sizes() const { + return sizes_; + } + + auto strides() const { + return strides_; + } + + auto device_type() const { + return device_type_; + } + + auto device_idx() const { + return device_idx_; + } + + T* data() const { + return arrayRef_.data(); + } + + auto numel() const { + return numel_; + } + + void set_arrayref(MiniArrayRef new_arrayref) { + arrayRef_ = new_arrayref; + } + + private: + MiniArrayRef arrayRef_; + // We expect generated code to have statically available sizes & + // strides for us. + MiniArrayRef sizes_; + MiniArrayRef strides_; + int32_t device_type_ = 0; + int32_t device_idx_ = 0; + int32_t numel_ = 0; +}; + +inline AtenTensorHandle reinterpret_tensor_wrapper( + AtenTensorHandle self, + int64_t ndim, + const int64_t* sizes_ptr, + const int64_t* strides_ptr, + int64_t storage_offset) { + AtenTensorHandle result; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch__reinterpret_tensor( + self, ndim, sizes_ptr, strides_ptr, storage_offset, &result)); + return result; +} + +template +inline ArrayRefTensor reinterpret_tensor_wrapper( + const ArrayRefTensor& self, + int64_t ndim, + const int64_t* sizes_ptr, + const int64_t* strides_ptr, + int64_t storage_offset) { + // REVIEW: we should add a way to build the DSO in debug mode during + // tests so we can have checks like this! + assert(is_contiguous_strides_for_shape(ndim, strides_ptr, sizes_ptr)); + return ArrayRefTensor( + MiniArrayRef( + self.data() + storage_offset, self.numel() - storage_offset), + MiniArrayRef(sizes_ptr, ndim), + MiniArrayRef(strides_ptr, ndim), + self.device_type(), + self.device_idx()); +} + +inline void* get_data_ptr_wrapper(AtenTensorHandle tensor) { + void* result; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_data_ptr(tensor, &result)); + return result; +} + +template +inline T* get_data_ptr_wrapper(ArrayRefTensor& tensor) { + return tensor.data(); +} + +template +inline T* get_data_ptr_wrapper(const MiniArrayRef& arr) { + return arr.data(); +} + +inline AtenTensorHandle unwrap_raii_handle_if_needed( + const RAIIAtenTensorHandle& handle) { + return handle.get(); +} + +template +inline const ArrayRefTensor& unwrap_raii_handle_if_needed( + const ArrayRefTensor& tensor) { + return tensor; +} + +template +inline ArrayRefTensor& unwrap_raii_handle_if_needed( + ArrayRefTensor& tensor) { + return tensor; +} + +inline RAIIAtenTensorHandle wrap_with_raii_handle_if_needed( + AtenTensorHandle handle) { + return RAIIAtenTensorHandle(handle); +} + +template +inline const ArrayRefTensor& wrap_with_raii_handle_if_needed( + const ArrayRefTensor& tensor) { + return tensor; +} + +template +inline ArrayRefTensor& wrap_with_raii_handle_if_needed( + ArrayRefTensor& tensor) { + return tensor; +} + +template +inline RAIIAtenTensorHandle expensive_copy_to_tensor_if_needed( + const ArrayRefTensor& tensor) { + return tensor.expensiveCopyToTensor(); +} + +inline AtenTensorHandle expensive_copy_to_tensor_if_needed( + AtenTensorHandle handle) { + return handle; +} + +template +const T& convert_arrayref_tensor_to_tensor(const T& t) { + return t; +} + +template +RAIIAtenTensorHandle convert_arrayref_tensor_to_tensor( + const ArrayRefTensor& art) { + return art.expensiveCopyToTensor(); +} + +} // namespace aot_inductor +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/device_utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/device_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..08a5db3a9e9c6d6da6035fd36e0c0b40e206337d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/device_utils.h @@ -0,0 +1,51 @@ +#pragma once + +// WARNING: Be careful when adding new includes here. This header will be used +// in model.so, and should not refer to any aten/c10 headers except the stable +// C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule +// applies to other files under torch/csrc/inductor/aoti_runtime/. + +#ifdef USE_CUDA + +// FIXME: Currently, CPU and CUDA backend are mutually exclusive. +// This is a temporary workaround. We need a better way to support +// multi devices. + +#include +#include + +#define AOTI_RUNTIME_DEVICE_CHECK(EXPR) \ + do { \ + const cudaError_t code = EXPR; \ + const char* msg = cudaGetErrorString(code); \ + if (code != cudaSuccess) { \ + throw std::runtime_error( \ + std::string("CUDA error: ") + std::string(msg)); \ + } \ + } while (0) + +namespace torch { +namespace aot_inductor { + +using DeviceStreamType = cudaStream_t; + +} // namespace aot_inductor +} // namespace torch + +#else // !USE_CUDA + +#define AOTI_RUNTIME_DEVICE_CHECK(EXPR) \ + bool ok = EXPR; \ + if (!ok) { \ + throw std::runtime_error("CPU runtime error"); \ + } + +namespace torch { +namespace aot_inductor { + +using DeviceStreamType = void*; + +} // namespace aot_inductor +} // namespace torch + +#endif // USE_CUDA diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/interface.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/interface.h new file mode 100644 index 0000000000000000000000000000000000000000..cf30c3742d523a18fce73bbdde40be55a7608190 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/interface.h @@ -0,0 +1,182 @@ +#pragma once + +// WARNING: Be careful when adding new includes here. This header will be used +// in model.so, and should not refer to any aten/c10 headers except the stable +// C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule +// applies to other files under torch/csrc/inductor/aoti_runtime/. +#include + +extern "C" { +struct AOTInductorModelOpaque; +using AOTInductorModelHandle = AOTInductorModelOpaque*; + +struct AOTInductorModelContainerOpaque; +using AOTInductorModelContainerHandle = AOTInductorModelContainerOpaque*; + +struct AOTInductorStreamOpaque; +using AOTInductorStreamHandle = AOTInductorStreamOpaque*; + +struct AOTInductorConstantMap; +using AOTInductorConstantMapHandle = AOTInductorConstantMap*; + +// TODO: Deprecate this API. This was kept for BC compatibility. +// Please use AOTInductorModelContainerCreateWithDevice instead. +AOTIRuntimeError AOTInductorModelContainerCreate( + AOTInductorModelContainerHandle* container_handle, + size_t num_models, + bool is_cpu, + const char* cubin_dir); + +// Creates an AOTInductor model container. The parameter num_models +// specifies the number of model instances that may be run concurrently for +// the same input model. +// `device_str` MUST NOT be nullptr. It must be a valid device string, e.g. +// "cpu", "cuda", "cuda:0", etc. If the device index is not specified for CUDA +// device, runtime will use the device index returned by +// "cudaGetDevice(&device_idx)" +AOTIRuntimeError AOTInductorModelContainerCreateWithDevice( + AOTInductorModelContainerHandle* container_handle, + size_t num_models, + const char* device_str, + const char* cubin_dir); + +// Deletes the AOTInductor model container. +AOTIRuntimeError AOTInductorModelContainerDelete( + AOTInductorModelContainerHandle container_handle); + +// Runs the inference. +AOTIRuntimeError AOTInductorModelContainerRun( + AOTInductorModelContainerHandle container_handle, + AtenTensorHandle* input_handles, // array of input AtenTensorHandle; handles + // are stolen; the array itself is borrowed + size_t num_inputs, + AtenTensorHandle* + output_handles, // array for writing output AtenTensorHandle; handles + // will be stolen by the caller; the array itself is + // borrowed + size_t num_outputs, + AOTInductorStreamHandle stream_handle, + AOTIProxyExecutorHandle proxy_executor_handle); + +// Retrieves the number of constants for the model. +AOTIRuntimeError AOTInductorModelContainerGetNumConstants( + AOTInductorModelContainerHandle container_handle, + size_t* num_constants); + +// Retrieves a constant's name. +// idx is the index of the internal's constants. +// Need idx < num_constants from AOTInductorModelContainerGetNumConstants +AOTIRuntimeError AOTInductorModelContainerGetConstantName( + AOTInductorModelContainerHandle container_handle, + size_t idx, + const char** name); + +// Retrieves a constant's original FQN. +// idx is the index of the internal's constants. +// Need idx < num_constants from AOTInductorModelContainerGetNumConstants +AOTIRuntimeError AOTInductorModelContainerGetConstantOriginalFQN( + AOTInductorModelContainerHandle container_handle, + size_t idx, + const char** original_fqn); + +// Retrieves whether a constant is from folded. +// idx is the index of the internal's constants. +// Need idx < num_constants from AOTInductorModelContainerGetNumConstants +AOTIRuntimeError AOTInductorModelContainerGetConstantFromFolded( + AOTInductorModelContainerHandle container_handle, + size_t idx, + bool* from_folded); + +// Retrieves a constant's dtype. +// idx is the index of the internal's constants. +// Need idx < num_constants from AOTInductorModelContainerGetNumConstants +AOTIRuntimeError AOTInductorModelContainerGetConstantDtype( + AOTInductorModelContainerHandle container_handle, + size_t idx, + int32_t* dtype); + +// Setup the constant buffer in model container with provided ConstantMap +// use_inactive should be set as true if the inactive buffer is to be updated. +// validate_full_update checks if all constants are included in the ConstantMap +AOTIRuntimeError AOTInductorModelContainerUpdateConstantBuffer( + AOTInductorModelContainerHandle container_handle, + AOTInductorConstantMapHandle constant_map_handle, + bool use_inactive, + bool validate_full_update); + +// Setup the inactive constant buffer in model container with provided +// ConstantMap +AOTIRuntimeError AOTInductorModelContainerUpdateInactiveConstantBuffer( + AOTInductorModelContainerHandle container_handle, + AOTInductorConstantMapHandle constant_map_handle); + +// Run constant folding on constant buffer. +AOTIRuntimeError AOTInductorModelContainerRunConstantFolding( + AOTInductorModelContainerHandle container_handle, + bool use_inactive, + AOTInductorStreamHandle stream_handle, + AOTIProxyExecutorHandle proxy_executor_handle); + +// Swap the constant buffer being used to the inactive one. +AOTIRuntimeError AOTInductorModelContainerSwapConstantBuffer( + AOTInductorModelContainerHandle container_handle); + +// Retrieves the number of inputs for the model. +AOTIRuntimeError AOTInductorModelContainerGetNumInputs( + AOTInductorModelContainerHandle container_handle, + size_t* ret_num_inputs); + +// Retrieves the input name at the given index. +AOTIRuntimeError AOTInductorModelContainerGetInputName( + AOTInductorModelContainerHandle container_handle, + size_t input_idx, + const char** ret_input_names); + +// Retrieves the number of outputs for the model. +AOTIRuntimeError AOTInductorModelContainerGetNumOutputs( + AOTInductorModelContainerHandle container_handle, + size_t* ret_num_outputs); + +// Retrieves the output name at the given index. +AOTIRuntimeError AOTInductorModelContainerGetOutputName( + AOTInductorModelContainerHandle container_handle, + size_t output_idx, + const char** ret_output_names); + +// Creates an AOTInductorModel instance. This is a thin and light wrapper +// around the compiled model; it doesn't handle concurrency, queueing, device +// management, etc. Use this if bare-metal performance is needed and you are +// willing to handle other "management" aspects yourself. +// +// constant_map_handle is an opaque type to satisfy the C ABI. It should be a +// std::unordered_map*. +AOTIRuntimeError AOTInductorModelCreate( + AOTInductorModelHandle* model_handle, + AOTInductorConstantMapHandle constant_map_handle); + +// Run an AOTInductorModel (see AOTInductorModelCreate for when one should use +// this function versus AOTInductorModelContainerRun). +AOTIRuntimeError AOTInductorModelRun( + AOTInductorModelHandle model_handle, + AtenTensorHandle* input_handles, + AtenTensorHandle* output_handles); + +// Replace AOTInductorModel's constant map. Note it doesn't handle concurrency +// so be sure to handle ordering if AOTInductorModelRun is ran concurrently. +AOTIRuntimeError AOTInductorModelUpdateConstantsMap( + AOTInductorModelHandle model_handle, + AOTInductorConstantMapHandle constant_map_handle); + +// Delete an AOTInductorModel created by AOTInductorModelCreate. +AOTIRuntimeError AOTInductorModelDelete(AOTInductorModelHandle model_handle); + +AOTIRuntimeError AOTInductorModelGetNumOutputs( + AOTInductorModelHandle model_handle, + size_t* ret_num_outputs); + +AOTIRuntimeError AOTInductorModelContainerGetCallSpec( + AOTInductorModelContainerHandle container_handle, + const char** in_spec, + const char** out_spec); + +} // extern "C" diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model.h new file mode 100644 index 0000000000000000000000000000000000000000..a749957e7de3b9a05b1b71e32e68e921feb9a1db --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model.h @@ -0,0 +1,522 @@ +#pragma once + +#include +#include +#include + +// WARNING: Be careful when adding new includes here. This header will be used +// in model.so, and should not refer to any aten/c10 headers except the stable +// C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule +// applies to other files under torch/csrc/inductor/aoti_runtime/. +#include +#include + +#define AOTI_RUNTIME_CHECK(EXPR, MSG) \ + do { \ + bool ok = EXPR; \ + if (!ok) { \ + throw std::runtime_error(MSG); \ + } \ + } while (0) + +// At codegen time, we write out a binary file called constants.bin. +// We then turn the raw binary to an object file that exposes this +// symbol and link it into the final .so. +// For information on the binary format, see `man objcopy`, under +// the "binary-architecture" flag: +// https://man7.org/linux/man-pages/man1/objcopy.1.html +// todo: use #embed in C++ 23 once available +extern const uint8_t _binary_constants_bin_start[]; +extern const uint8_t _binary_constants_bin_end[]; + +#define AOTI_CONST_GPU_ALIGNMENT 64 + +namespace { + +#ifdef USE_CUDA + +using CUDAPtr = std::unique_ptr>; + +CUDAPtr RAII_cudaMalloc(size_t num_bytes) { + void* data_ptr; + AOTI_RUNTIME_DEVICE_CHECK(cudaMalloc((void**)&data_ptr, num_bytes)); + auto deleter = [](void* ptr) { AOTI_RUNTIME_DEVICE_CHECK(cudaFree(ptr)); }; + return CUDAPtr(data_ptr, deleter); +} + +#endif // USE_CUDA + +} // anonymous namespace + +namespace torch { +namespace aot_inductor { +using ConstantMap = std::unordered_map; + +// valid device strs are: cpu, cuda, cuda:0, cuda:1, ... +// Update the list here if more devices are supported in the future +inline void parse_device_str( + const std::string& device_str, + int32_t& device_type, + int32_t& device_idx) { + std::regex re("(cpu|cuda)(:([0-9]+))?"); + std::smatch sm; + bool matched = std::regex_match(device_str, sm, re); + AOTI_RUNTIME_CHECK(matched, "Invalid device: " + device_str); + + if (sm[1].str() == "cpu") { + device_type = aoti_torch_device_type_cpu(); + } else if (sm[1].str() == "cuda") { + device_type = aoti_torch_device_type_cuda(); + } else { + AOTI_RUNTIME_CHECK(false, "Invalid device: " + device_str); + } + + if (sm[3].matched) { + device_idx = stoi(sm[3].str()); + } else { + device_idx = -1; + } +} + +// Defines the base class for AOTInductorModel, which is generated by the +// AOTInductor cpp codegen. Since we do not need dynamic dispatch, we rely +// on curiously recurring template pattern (CRTP) to save some runtime +// v-table overhead. The generated AOTInductorModel is specialized with +// methods such as run_impl. +template +class AOTInductorModelBase { + public: + AOTInductorModelBase( + size_t num_inputs, + size_t num_outputs, + size_t num_constants, + const std::string& device_str, + std::optional cubin_dir) + : inputs_info_(num_inputs), + outputs_info_(num_outputs), + constants_info_(num_constants), + cubin_dir_(cubin_dir) { + parse_device_str(device_str, device_type_, device_idx_); + +#ifdef USE_CUDA + if (device_idx_ == -1) { + AOTI_RUNTIME_DEVICE_CHECK(cudaGetDevice(&device_idx_)); + } +#endif // USE_CUDA + } + + ~AOTInductorModelBase() { +#ifdef USE_CUDA + if (run_finished_) { + auto code = cudaEventDestroy(*run_finished_); + if (code != cudaSuccess) { + std::cerr << "Failed to destroy CUDA event in AOTInductor model: " + << cudaGetErrorString(code) << std::endl; + } + } +#endif // USE_CUDA + } + + AOTInductorModelBase(AOTInductorModelBase&&) = delete; + AOTInductorModelBase& operator=(AOTInductorModelBase&&) = delete; + AOTInductorModelBase(const AOTInductorModelBase&) = delete; + AOTInductorModelBase& operator=(const AOTInductorModelBase&) = delete; + + void run( + AtenTensorHandle* + input_handles, // array of input AtenTensorHandle; handles + // are stolen; the array itself is borrowed + AtenTensorHandle* + output_handles, // array for writing output AtenTensorHandle; handles + // will be stolen by the caller; the array itself is + // borrowed + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor) { +#ifdef USE_CUDA + if (!run_finished_) { + cudaEvent_t run_finished; + AOTI_RUNTIME_DEVICE_CHECK(cudaEventCreate(&run_finished)); + run_finished_.emplace(run_finished); + } + + auto* model = static_cast(this); + model->run_impl(input_handles, output_handles, stream, proxy_executor); + AOTI_RUNTIME_DEVICE_CHECK(cudaEventRecord(*run_finished_, stream)); +#else // !USE_CUDA + run_finished_ = false; + auto* model = static_cast(this); + model->run_impl(input_handles, output_handles, stream, proxy_executor); + run_finished_ = true; +#endif // USE_CUDA + } + + std::unordered_map run_const_fold( + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor, + bool initialization = false) { +#ifdef USE_CUDA + if (!run_finished_) { + cudaEvent_t run_finished; + AOTI_RUNTIME_DEVICE_CHECK(cudaEventCreate(&run_finished)); + run_finished_.emplace(run_finished); + } + + auto* model = static_cast(this); + auto folded_constants = + model->const_run_impl(stream, proxy_executor, initialization); + AOTI_RUNTIME_DEVICE_CHECK(cudaEventRecord(*run_finished_, stream)); + return folded_constants; +#else // !USE_CUDA + return {}; +#endif // USE_CUDA + } + + void load_constants() { + size_t num_constants = this->num_constants(); + constants_map_->reserve(num_constants); + + std::vector constants_internal_offset(num_constants); + if (device_type_ != aoti_torch_device_type_cpu()) { + size_t blob_size = 0; + compute_cuda_constant_blob(blob_size, constants_internal_offset); +#ifdef USE_CUDA + constant_blob_ = RAII_cudaMalloc(blob_size); +#endif + } + + size_t bytes_read = 0; + for (size_t i = 0; i < num_constants; i++) { + std::string name = this->constant_name(i); + size_t data_size = this->constant_data_size(i); + bool from_folded = this->constant_from_folded(i); + uint8_t* internal_ptr = (data_size != 0) + ? constant_ptr( + constants_internal_offset[i], + bytes_read, + data_size, + from_folded) + : nullptr; + bytes_read += data_size; + + // Create at::Tensor from copied memory. + auto dtype = this->constant_dtype(i); + auto ndim = this->constant_ndim(i); + auto size = this->constant_shape(i); + auto stride = this->constant_stride(i); + auto offset = this->constant_offset(i); + + AtenTensorHandle tensor_handle; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_create_tensor_from_blob( + internal_ptr, + ndim, + size, + stride, + offset, + dtype, + device_type_, + device_idx_, + &tensor_handle)); + constants_map_->emplace(std::move(name), tensor_handle); + } + if (constants_map_) { + this->update_constants_array_from_map(); + } + } + +#ifdef USE_CUDA + CUDAPtr&& release_constant_blob() { + return std::move(constant_blob_); + } +#endif + + std::shared_ptr> get_constants_array() { + return constants_; + } + + const int32_t get_device_idx() const { + return device_idx_; + } + + uint8_t* constant_ptr( + size_t constant_offset, + size_t bytes_read, + size_t data_size, + bool skip_copy) { +#ifdef USE_CUDA + auto* constants_ptr = static_cast(constant_blob_.get()); + uint8_t* internal_ptr = constants_ptr + constant_offset; + // Copy data to GPU memory + // TODO: Handle shared storage case. + if (!skip_copy) { + AOTI_RUNTIME_DEVICE_CHECK(cudaMemcpy( + internal_ptr, + _binary_constants_bin_start + bytes_read, + data_size, + cudaMemcpyHostToDevice)); + } + return internal_ptr; +#else // !USE_CUDA + // get pointer to constant which is packed in model during compile time. + AOTI_RUNTIME_CHECK(!skip_copy, "pure cpu mode doesn't support skip copy"); + return const_cast(_binary_constants_bin_start) + bytes_read; +#endif // USE_CUDA + } + + void compute_cuda_constant_blob( + size_t& blob_size, + std::vector& constants_internal_offset) { +#ifdef USE_CUDA + size_t num_constants = this->num_constants(); + // Compute required blob size with 64-alignment if on GPU. + blob_size = 0; + for (size_t i = 0; i < num_constants; i++) { + size_t data_size = this->constant_data_size(i); + if (data_size % AOTI_CONST_GPU_ALIGNMENT) { + data_size = AOTI_CONST_GPU_ALIGNMENT + + (data_size / AOTI_CONST_GPU_ALIGNMENT) * AOTI_CONST_GPU_ALIGNMENT; + } + constants_internal_offset[i] = blob_size; + blob_size += data_size; + } +#endif // USE_CUDA + } + + size_t num_inputs() const { + return inputs_info_.size(); + } + + size_t num_outputs() const { + return outputs_info_.size(); + } + + size_t num_constants() const { + return constants_info_.size(); + } + + const char* input_name(int64_t idx) const { + return inputs_info_.at(idx).name; + } + + const char* output_name(int64_t idx) const { + return outputs_info_.at(idx).name; + } + + const char* constant_name(int64_t idx) const { + return constants_info_.at(idx).name; + } + + size_t constant_ndim(int64_t idx) { + return constants_info_.at(idx).shape.size(); + } + + const int64_t* constant_shape(int64_t idx) const { + return constants_info_.at(idx).shape.data(); + } + + const int64_t* constant_stride(int64_t idx) const { + return constants_info_.at(idx).stride.data(); + } + + int32_t constant_dtype(int64_t idx) const { + return constants_info_.at(idx).dtype; + } + + size_t constant_offset(int64_t idx) const { + return constants_info_.at(idx).offset; + } + + size_t constant_data_size(int64_t idx) const { + return constants_info_.at(idx).data_size; + } + + const char* constant_original_fqn(int64_t idx) const { + return constants_info_.at(idx).original_fqn; + } + + bool constant_from_folded(int64_t idx) const { + return constants_info_.at(idx).from_folded; + } + + const char* get_in_spec() const { + return in_spec_.c_str(); + } + + const char* get_out_spec() const { + return out_spec_.c_str(); + } + + void update_constants_array_from_map() { + if (!constants_map_) { + throw std::runtime_error{ + "constants_map_ was not ready when constants_ is trying to be constructed from it!"}; + } + if (!constants_) { + constants_ = + std::make_shared>(constants_info_.size()); + } else { + constants_->resize(constants_info_.size()); + } + int idx = 0; + for (const auto& info : constants_info_) { + const auto it = constants_map_->find(info.name); + if (it != constants_map_->end()) { + constants_->at(idx) = ConstantHandle(it->second); + } + idx++; + } + } + + void update_constants_map( + std::shared_ptr constants_map, + bool remap_constants_array = true) { + constants_map_ = std::move(constants_map); + if (remap_constants_array) { + update_constants_array_from_map(); + } + } + + // This function allows us to update the constants_ that is used to look up + // the corresponding constant tensor during runtime. + void update_constants_array( + std::shared_ptr> constants_array) { + constants_ = std::move(constants_array); + } + + /// Returns true if the model is complete. + bool is_finished() { +#ifdef USE_CUDA + if (!run_finished_) { + throw std::runtime_error{"Model CUDA event was not initialized"}; + } + + auto event_status = cudaEventQuery(*run_finished_); + if (event_status == cudaSuccess) { + return true; + } else if (event_status == cudaErrorNotReady) { + return false; + } + + throw std::runtime_error( + std::string("The model did not finish successfully. Error: ") + + cudaGetErrorString(cudaGetLastError())); +#else // !USE_CUDA + return run_finished_; +#endif // USE_CUDA + } + + /// Synchronizes completion event. + void wait_for_completion() { +#ifdef USE_CUDA + if (!run_finished_) { + throw std::runtime_error{"Model event was not initialized"}; + } + + AOTI_RUNTIME_DEVICE_CHECK(cudaEventSynchronize(*run_finished_)); +#endif // USE_CUDA + } + + protected: + struct ParamInfo { + const char* name = nullptr; + }; + + struct ConstInfo { + const char* name = nullptr; + std::vector shape; + std::vector stride; + int32_t dtype; + int64_t offset; + size_t data_size; + const char* original_fqn = nullptr; + bool from_folded; + }; + + std::vector inputs_info_; + std::vector outputs_info_; + std::vector constants_info_; + std::string in_spec_; + std::string out_spec_; + + std::shared_ptr constants_map_; + std::shared_ptr> constants_; + +#ifdef USE_CUDA + // Holds the blob storage for constants' at::Tensor for CUDA. + CUDAPtr constant_blob_; +#endif // USE_CUDA + + // A directory with CUDA binary files, e.g. compiled kernels, etc. + const std::optional cubin_dir_; + + // Record if the model finishes an inference run so that its owning + // AOTModelContainer can re-use this instance. +#ifdef USE_CUDA + std::optional run_finished_; +#else // !USE_CUDA + bool run_finished_; +#endif + + // Generated model uses this device index to create CUDA guards. + int32_t device_type_; + int32_t device_idx_; +}; + +// Codegen-ed classes can derive from this to keep pointers to loaded kernels. +class AOTInductorModelKernelsBase { + public: + virtual ~AOTInductorModelKernelsBase() = default; +}; + +class AOTInductorModel : public AOTInductorModelBase { + public: + AOTInductorModel( + std::shared_ptr constants_map, + std::shared_ptr> constants_array, + const std::string& device_str, + std::optional cubin_dir); + + std::unordered_map const_run_impl( + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor, + bool initialization = false); + + void _const_run_impl( + std::vector& output_handles, + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor); + + void run_impl( + AtenTensorHandle* + input_handles, // array of input AtenTensorHandle; handles + // are stolen; the array itself is borrowed + AtenTensorHandle* + output_handles, // array for writing output AtenTensorHandle; handles + // will be stolen by the caller; the array itself is + // borrowed + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor); + + template + Outputs run_impl_minimal_arrayref_interface( + const Inputs& inputs, + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor); + + static std::unique_ptr Create( + std::shared_ptr constants_map, + std::shared_ptr> constants_array, + const std::string& device_str, + std::optional cubin_dir) { + return std::make_unique( + std::move(constants_map), + std::move(constants_array), + device_str, + cubin_dir); + } + + private: + std::unique_ptr kernels_; +}; + +} // namespace aot_inductor +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model_container.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model_container.h new file mode 100644 index 0000000000000000000000000000000000000000..7196e10910fb8c96773e3d0b646b1a77c00370ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model_container.h @@ -0,0 +1,510 @@ +#pragma once + +#include +#include +#include +#include +#include + +// WARNING: Be careful when adding new includes here. This header will be used +// in model.so, and should not refer to any aten/c10 headers except the stable +// C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule +// applies to other files under torch/csrc/inductor/aoti_runtime/. +#include + +namespace torch { +namespace aot_inductor { + +class AOTInductorModelContainer { + public: + AOTInductorModelContainer( + size_t num_models, + const std::string& device_str, + std::optional cubin_dir = std::nullopt) { + constants_map_ = std::make_shared(); + constants_array_ = std::make_shared>(); + use_secondary_ = false; + constant_folded_ = false; + models_.reserve(num_models); + available_models_.reserve(num_models); + for (size_t i = 0; i < num_models; ++i) { + models_.push_back(AOTInductorModel::Create( + constants_map_, constants_array_, device_str, cubin_dir)); + available_models_.push_back(models_.back().get()); + } + + // Note that the all following fields (input_names_, output_names, + // etc) can be filled in by the AOT + // codegen. However, we choose to query such information from + // the owned AOTInductorModel for a couple of reasons: + // * simplify the codegen templates + // * reduce information fragmentation and duplication + // * the initialization process below is done only once when the container + // is constructed, so it would have little performance impact + auto* model = available_models_[0]; + size_t num_inputs = model->num_inputs(); + input_names_.reserve(num_inputs); + for (size_t i = 0; i < num_inputs; i++) { + input_names_.push_back(model->input_name(i)); + } + + size_t num_outputs = model->num_outputs(); + output_names_.reserve(num_outputs); + for (size_t i = 0; i < num_outputs; i++) { + output_names_.push_back(model->output_name(i)); + } + + model->load_constants(); +#ifdef USE_CUDA + constant_blob_ = model->release_constant_blob(); + constants_internal_offset_.resize(model->num_constants()); + model->compute_cuda_constant_blob(blob_size_, constants_internal_offset_); +#endif + + for (auto& model : models_) { + model->update_constants_map(constants_map_); + } + + in_spec_ = model->get_in_spec(); + out_spec_ = model->get_out_spec(); + } + + void run( + AtenTensorHandle* + input_handles, // array of input AtenTensorHandle; handles + // are stolen; the array itself is borrowed + AtenTensorHandle* + output_handles, // array for writing output AtenTensorHandle; handles + // will be stolen by the caller; the array itself is + // borrowed + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor) { + std::shared_lock model_lk(model_exec_mutex_); + auto* model = get_available_model(); + + if (!constant_folded_) { + // At this point, constant is not ready yet. We need to call constant + // folding before we execute the model. We obtain a unique lock at this + // point to make sure constant is ready for all. + model_lk.unlock(); + std::unique_lock constants_folding_lk(model_exec_mutex_); + // Double locking to make sure constant folding is only ran once. + if (!constant_folded_) { + auto folded_const_map = model->run_const_fold( + stream, proxy_executor, /* initialization = */ true); + update_constant_buffer( + folded_const_map, + /* use_inactive = */ false, + /* validate_full_update = */ false); + constant_folded_ = true; + } + constants_folding_lk.unlock(); + model_lk.lock(); + } + + try { + model->run(input_handles, output_handles, stream, proxy_executor); + } catch (...) { + std::lock_guard lk(models_mutex_); + available_models_.push_back(model); + throw; + } + + { + std::lock_guard lk(models_mutex_); + pending_models_.push_back(model); + } + pending_models_available_.notify_one(); + } + + size_t num_constants() const { + if (this->num_models() == 0) { + throw std::runtime_error("No available models in container!"); + } + return models_[0]->num_constants(); + } + + // retrieve the constant name of constants_info_[idx] + const char* constant_name(size_t idx) const { + if (this->num_models() == 0) { + throw std::runtime_error("No available models in container!"); + } + return models_[0]->constant_name(idx); + } + + // retrieve original FQN of constants_info_[idx] + const char* constant_original_fqn(size_t idx) const { + if (this->num_models() == 0) { + throw std::runtime_error("No available models in container!"); + } + return models_[0]->constant_original_fqn(idx); + } + + // retrieve whether constant is from folded of constants_info_[idx] + bool constant_from_folded(size_t idx) const { + if (this->num_models() == 0) { + throw std::runtime_error("No available models in container!"); + } + return models_[0]->constant_from_folded(idx); + } + + // retrieve dtype of constants_info_[idx] + int32_t constant_dtype(size_t idx) const { + if (this->num_models() == 0) { + throw std::runtime_error("No available models in container!"); + } + return models_[0]->constant_dtype(idx); + } + + void run_const_fold( + bool inactive_buffer, + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor) { + std::shared_lock model_lk(model_exec_mutex_); + auto* model = get_available_model(); + + if (!inactive_buffer) { + // We would need to acquire a unique lock if we want to run constant + // folding on the active buffer. + model_lk.unlock(); + std::unique_lock constants_folding_lk(model_exec_mutex_); + try { + auto folded_const_map = model->run_const_fold(stream, proxy_executor); + update_constant_buffer( + folded_const_map, + /* use_inactive = */ false, + /* validate_full_update = */ false); + } catch (...) { + std::lock_guard lk(models_mutex_); + available_models_.push_back(model); + throw; + } + constants_folding_lk.unlock(); + model_lk.lock(); + } else { + // We swap the constant mapping to the inactive buffer in the model to run + // const run. + auto constants_map = get_constants_map(/* get_inactive= */ true); + auto constants_array = get_constants_array(/* get_inactive= */ true); + + try { + model->update_constants_map( + constants_map, /* remap_constants_array= */ false); + model->update_constants_array(constants_array); + + auto folded_const_map = model->run_const_fold(stream, proxy_executor); + update_constant_buffer( + folded_const_map, + /* use_inactive = */ true, + /* validate_full_update = */ false); + + // Swap back the model's constants mapping + constants_map = get_constants_map(/* get_inactive= */ false); + constants_array = get_constants_array(/* get_inactive= */ false); + model->update_constants_map( + constants_map, /* remap_constants_array= */ false); + model->update_constants_array(constants_array); + } catch (...) { + std::lock_guard lk(models_mutex_); + available_models_.push_back(model); + throw; + } + } + + { + std::lock_guard lk(models_mutex_); + pending_models_.push_back(model); + } + pending_models_available_.notify_one(); + } + + // This function updates the buffer for storing constants. + // It will update the buffer, the mapping and the array mapping. + void update_constant_buffer( + const std::unordered_map& constants_map, + bool use_inactive, + bool validate_full_update) { +#ifdef USE_CUDA + if (this->num_models() == 0) { + throw std::runtime_error("No model available in container!"); + } + auto num_constants = models_[0]->num_constants(); + + auto* constants_blob_ptr = + static_cast(get_constant_blob_ptr(use_inactive)); + auto constants_map_to_update = get_constants_map(use_inactive); + + if (validate_full_update) { + for (size_t idx = 0; idx < num_constants; idx++) { + if (models_[0]->constant_from_folded(idx)) { + continue; + } + + auto constant_name = std::string(models_[0]->constant_name(idx)); + auto it = constants_map.find(constant_name); + if (it == constants_map.end()) { + throw std::runtime_error( + std::string("Cannot find constants ") + constant_name + + std::string(" in constants_map!")); + } + } + } + + for (size_t idx = 0; idx < num_constants; idx++) { + auto constant_name = std::string(models_[0]->constant_name(idx)); + auto it = constants_map.find(constant_name); + if (it == constants_map.end()) { + continue; + } + + // Move the data to container handled blob. + uint8_t* internal_constants_ptr = + constants_blob_ptr + constants_internal_offset_[idx]; + void* user_constant_ptr; + int64_t constant_size; + aoti_torch_get_data_ptr(it->second, &user_constant_ptr); + aoti_torch_get_storage_size(it->second, &constant_size); + + AOTI_RUNTIME_DEVICE_CHECK(cudaMemcpy( + internal_constants_ptr, + user_constant_ptr, + constant_size, + cudaMemcpyDefault)); + + // Generate Tensor from container handled blob. + // We extract stride and offset from provided Tensor since we do not + // guarantee that the tensor is contiguous. + AtenTensorHandle tensor_handle; + int64_t* stride; + int64_t offset; + int device_idx = models_[0]->get_device_idx(); + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_strides(it->second, &stride)); + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_get_storage_offset(it->second, &offset)); + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_create_tensor_from_blob( + internal_constants_ptr, + models_[0]->constant_ndim(idx), + models_[0]->constant_shape(idx), + stride, + offset, + models_[0]->constant_dtype(idx), + aoti_torch_device_type_cuda(), + device_idx, + &tensor_handle)); + + // Now place the tensor to constants_map. Note at this point the ownership + // of the tensor_handle will be taken over. + constants_map_to_update->emplace(constant_name, tensor_handle); + } + + // Update the inactive constant array. + update_array_from_map( + get_constants_array(use_inactive), constants_map_to_update); +#endif // USE_CUDA + } + + void update_array_from_map( + std::shared_ptr> constants_array, + std::shared_ptr constants_map) { + auto num_constants = models_[0]->num_constants(); + for (size_t idx = 0; idx < num_constants; idx++) { + if (constants_map->find(models_[0]->constant_name(idx)) != + constants_map->end()) { + constants_array->at(idx) = ConstantHandle( + constants_map->find(models_[0]->constant_name(idx))->second); + } + } + } + + void swap_constant_buffer() { + std::lock_guard unique_lk(model_exec_mutex_); + + auto constants_map = get_constants_map(/* get_inactive= */ true); + auto constants_array = get_constants_array(/* get_inactive= */ true); + + for (auto& model : models_) { + model->update_constants_map( + constants_map, /* remap_constants_array = */ false); + model->update_constants_array(constants_array); + } + + use_secondary_ = !use_secondary_; + } + + size_t num_inputs() const { + return input_names_.size(); + } + + size_t num_outputs() const { + return output_names_.size(); + } + + const char* input_name(size_t idx) const { + return input_names_.at(idx).c_str(); + } + + const char* output_name(size_t idx) const { + return output_names_.at(idx).c_str(); + } + + size_t num_models() const { + return models_.size(); + } + + const char* get_in_spec() const { + return in_spec_; + } + + const char* get_out_spec() const { + return out_spec_; + } + + private: + std::vector input_names_; + std::vector output_names_; + const char* in_spec_; + const char* out_spec_; + +#ifdef USE_CUDA + // Holds the blob storage for constants' at::Tensor for CUDA. + CUDAPtr constant_blob_; + CUDAPtr constant_blob_secondary_; + + // Let's place this within USE_CUDA at the moment before we fully support + // update for CPU cases. + size_t blob_size_; + std::vector constants_internal_offset_; +#endif // USE_CUDA + + // Determine which constants is being used for the model. + // If true, + // constants_map_secondary/constant_blob_secondary/constants_array_secondary + // is being used. + bool use_secondary_; + + // Determine whether we have ran constant folding + bool constant_folded_; + + // Holds the mapping of constants to at::Tensor. + // The underlying data of at::Tensor is in either constant_blob_ (for CUDA). + // or _binary_constants_bin_start (for CPU). + std::shared_ptr constants_map_; + std::shared_ptr constants_map_secondary_; + + // Holds the indexed array of constant for faster lookup during runtime. + std::shared_ptr> constants_array_; + std::shared_ptr> constants_array_secondary_; + + // Holds all the AOTInductorModel instances owned by this container. + std::vector> models_; + + // Holds the AOTInductorModel instances available for inference. + std::vector available_models_; + + // Holds the AOTInductorModel instances that have started running + // inference and can be placed onto available_models_ upon their + // completion. + std::deque pending_models_; + + // Protects available_models_ and pending_models_. + std::mutex models_mutex_; + + // Notified whenever a model is placed onto pending_models_. + std::condition_variable pending_models_available_; + + AOTInductorModel* get_available_model() { + std::unique_lock lk(models_mutex_); + if (available_models_.empty()) { + reclaim_finished_models(lk); + } + auto* result = available_models_.back(); + available_models_.pop_back(); + return result; + } + + // This mutex is used to protect execution of model. + // We acquire the mutex in shared mode if we allow concurrent execution. + // We acquire the mutex in unique mode when we want exclusive access of the + // model. One such case is when we want to do a weight swapping. We want to + // make sure no one is executing the model. + std::shared_mutex model_exec_mutex_; + +#ifdef USE_CUDA + void* get_constant_blob_ptr(bool get_inactive) { + if ((get_inactive && use_secondary_) || + (!get_inactive && !use_secondary_)) { + return constant_blob_.get(); + } else { + if (!constant_blob_secondary_) { + constant_blob_secondary_ = RAII_cudaMalloc(blob_size_); + } + return constant_blob_secondary_.get(); + } + } +#endif // USE_CUDA + + std::shared_ptr get_constants_map(bool get_inactive) { + if ((get_inactive && use_secondary_) || + (!get_inactive && !use_secondary_)) { + return constants_map_; + } else { + if (!constants_map_secondary_) { + constants_map_secondary_ = std::make_shared(); + } + return constants_map_secondary_; + } + } + + std::shared_ptr> get_constants_array( + bool get_inactive) { + if ((get_inactive && use_secondary_) || + (!get_inactive && !use_secondary_)) { + return constants_array_; + } else { + if (!constants_array_secondary_) { + constants_array_secondary_ = + std::make_shared>( + models_[0]->num_constants()); + } + return constants_array_secondary_; + } + } + + void reclaim_finished_models(std::unique_lock& lk) { + // push finished model instances to the end of pending_models_ + auto it = std::stable_partition( + pending_models_.begin(), + pending_models_.end(), + [](AOTInductorModel* m) { return !m->is_finished(); }); + + if (it != pending_models_.end()) { + // We have finished model instances that can be pushed into + // available_models_ so that we don't have to be blocked on waiting + // the pending_models_available_ condition. + available_models_.insert( + available_models_.end(), it, pending_models_.end()); + pending_models_.erase(it, pending_models_.end()); + return; + } + + pending_models_available_.wait( + lk, [this]() { return !pending_models_.empty(); }); + // Let's make the schedule simple first. We always wait on the first + // pending_models_ to be complete. + auto* model = pending_models_.front(); + pending_models_.pop_front(); + lk.unlock(); + try { + model->wait_for_completion(); + } catch (...) { + lk.lock(); + available_models_.push_back(model); + throw; + } + lk.lock(); + available_models_.push_back(model); + } +}; + +} // namespace aot_inductor +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/scalar_to_tensor.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/scalar_to_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..5f836a4fac2e24378a3a7ab4b49a9f4c7ac93fa9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/scalar_to_tensor.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +namespace torch { +namespace aot_inductor { + +template +inline RAIIAtenTensorHandle scalar_to_tensor_handle(T value) { + throw std::runtime_error("Unsupported scalar_to_tensor_handle"); +} + +// Specialize for supported C++ primitive types +#define AOTI_RUNTIME_SCALAR_TO_TENSOR(dtype, ctype) \ + template <> \ + inline RAIIAtenTensorHandle scalar_to_tensor_handle(ctype value) { \ + AtenTensorHandle tensor_handle; \ + AOTI_TORCH_ERROR_CODE_CHECK( \ + aoti_torch_scalar_to_tensor_##dtype(value, &tensor_handle)); \ + return RAIIAtenTensorHandle(tensor_handle); \ + } + +AOTI_RUNTIME_SCALAR_TO_TENSOR(float32, float) +AOTI_RUNTIME_SCALAR_TO_TENSOR(float64, double) +AOTI_RUNTIME_SCALAR_TO_TENSOR(uint8, uint8_t) +AOTI_RUNTIME_SCALAR_TO_TENSOR(uint16, uint16_t) +AOTI_RUNTIME_SCALAR_TO_TENSOR(uint32, uint32_t) +AOTI_RUNTIME_SCALAR_TO_TENSOR(uint64, uint64_t) +AOTI_RUNTIME_SCALAR_TO_TENSOR(int8, int8_t) +AOTI_RUNTIME_SCALAR_TO_TENSOR(int16, int16_t) +AOTI_RUNTIME_SCALAR_TO_TENSOR(int32, int32_t) +AOTI_RUNTIME_SCALAR_TO_TENSOR(int64, int64_t) +AOTI_RUNTIME_SCALAR_TO_TENSOR(bool, bool) +#undef AOTI_RUNTIME_SCALAR_TO_TENSOR + +} // namespace aot_inductor +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/thread_local.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/thread_local.h new file mode 100644 index 0000000000000000000000000000000000000000..c48bb4b9345eada64f16f87862cf9dac26e84d6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/thread_local.h @@ -0,0 +1,158 @@ +#pragma once + +#include + +namespace torch { +namespace aot_inductor { + +template +struct ThreadLocalCachedOutputTensor; + +template <> +struct ThreadLocalCachedOutputTensor { + explicit ThreadLocalCachedOutputTensor(const RAIIAtenTensorHandle&) {} + void copy_data_from(const RAIIAtenTensorHandle& handle) { + throw std::runtime_error("can't happen"); + } + + AtenTensorHandle tensor() const { + throw std::runtime_error("can't happen"); + } +}; + +template <> +struct ThreadLocalCachedOutputTensor { + explicit ThreadLocalCachedOutputTensor(const AtenTensorHandle&) {} + void copy_data_from(const AtenTensorHandle& handle) { + throw std::runtime_error("can't happen"); + } + + AtenTensorHandle tensor() const { + throw std::runtime_error("can't happen"); + } +}; + +template <> +struct ThreadLocalCachedOutputTensor { + explicit ThreadLocalCachedOutputTensor(const ConstantHandle&) {} + void copy_data_from(const ConstantHandle& handle) { + throw std::runtime_error("can't happen"); + } + + AtenTensorHandle tensor() const { + throw std::runtime_error("can't happen"); + } +}; + +template +struct ThreadLocalCachedOutputTensor> { + explicit ThreadLocalCachedOutputTensor(const ArrayRefTensor& t) { + realloc(t); + } + + void copy_data_from(const ArrayRefTensor& t) { + if (t.numel() > capacity_) { + realloc(t); + } + std::copy(t.data(), t.data() + t.numel(), storage_.get()); + } + + AtenTensorHandle tensor() const { + return tensor_.get(); + } + + private: + void realloc(const ArrayRefTensor& t) { + capacity_ = t.numel(); + storage_ = std::make_unique(t.numel()); + AtenTensorHandle handle; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_create_tensor_from_blob( + storage_.get(), + t.sizes().size(), + t.sizes().data(), + t.strides().data(), + 0, + aoti_torch_dtype>(), + t.device_type(), + t.device_idx(), + &handle)); + tensor_ = handle; + } + + std::unique_ptr storage_; + size_t capacity_ = 0; + RAIIAtenTensorHandle tensor_; +}; + +template +struct ThreadLocalCachedOutputArray; + +// Just needs to compile, doesn't need to do anything. +template <> +struct ThreadLocalCachedOutputArray { + explicit ThreadLocalCachedOutputArray(const RAIIAtenTensorHandle&) { + throw std::runtime_error("can't happen"); + } + + // Not supported yet! We would need to put contiguous() or + // expect_contiguous() into the ABI. + void copy_data_from(const RAIIAtenTensorHandle&) { + throw std::runtime_error("can't happen"); + } + + template + ArrayRefTensor arrayref_tensor() const { + throw std::runtime_error("can't happen"); + } +}; + +// Just needs to compile, doesn't need to do anything. +template <> +struct ThreadLocalCachedOutputArray { + explicit ThreadLocalCachedOutputArray(const ConstantHandle&) { + throw std::runtime_error("can't happen"); + } + + // Not supported yet! We would need to put contiguous() or + // expect_contiguous() into the ABI. + void copy_data_from(const ConstantHandle&) { + throw std::runtime_error("can't happen"); + } + + template + ArrayRefTensor arrayref_tensor() const { + throw std::runtime_error("can't happen"); + } +}; + +template +struct ThreadLocalCachedOutputArray> { + explicit ThreadLocalCachedOutputArray(const ArrayRefTensor& t) {} + + template < + typename U, + std::enable_if_t< + std::is_same_v, std::remove_const_t>, + bool> = true> + ArrayRefTensor arrayref_tensor() const { + return tensor_; + } + + void copy_data_from(const ArrayRefTensor& t) { + if (t.numel() > capacity_) { + capacity_ = t.numel(); + storage_ = std::make_unique(capacity_); + } + std::copy(t.data(), t.data() + t.numel(), storage_.get()); + tensor_ = t; + tensor_.set_arrayref(MiniArrayRef(storage_.get(), t.numel())); + } + + private: + std::unique_ptr storage_; + uint32_t capacity_ = 0; + ArrayRefTensor tensor_; +}; + +} // namespace aot_inductor +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..8020004b06bc95f6ba60a5ecce7fb9ded44b4116 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/utils.h @@ -0,0 +1,177 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +// WARNING: Be careful when adding new includes here. This header will be used +// in model.so, and should not refer to any aten/c10 headers except the stable +// C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule +// applies to other files under torch/csrc/inductor/aoti_runtime/. +#include + +#if defined(__GNUC__) || defined(__clang__) +#define AOTI_NOINLINE __attribute__((noinline)) +#elif _MSC_VER +#define AOTI_NOINLINE __declspec(noinline) +#else +#define AOTI_NOINLINE +#endif + +AOTI_NOINLINE static void throw_exception( + const char* call, + const char* file, + int64_t line) { + std::stringstream ss; + ss << call << " API call failed at " << file << ", line " << line; + throw std::runtime_error(ss.str()); +} + +#define AOTI_TORCH_ERROR_CODE_CHECK(call) \ + if ((call) != AOTI_TORCH_SUCCESS) { \ + throw_exception(#call, __FILE__, __LINE__); \ + } + +using AOTIRuntimeError = int32_t; +#define AOTI_RUNTIME_SUCCESS 0 +#define AOTI_RUNTIME_FAILURE 1 + +#define AOTI_RUNTIME_ERROR_CODE_CHECK(call) \ + if ((call) != AOTI_RUNTIME_SUCCESS) { \ + throw_exception(#call, __FILE__, __LINE__); \ + } + +namespace torch::aot_inductor { + +using DeleterFnPtr = void (*)(void*); + +inline void noop_deleter(void*) {} + +inline void delete_tensor_object(void* ptr) { + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_delete_tensor_object(reinterpret_cast(ptr))); +} + +// RAIIAtenTensorHandle steals the tensor objects created by the libtorch C ABI +class RAIIAtenTensorHandle { + public: + RAIIAtenTensorHandle() : handle_(nullptr, noop_deleter) {} + RAIIAtenTensorHandle(const RAIIAtenTensorHandle& other) = delete; + RAIIAtenTensorHandle& operator=(const RAIIAtenTensorHandle& other) = delete; + + // Steal the ownership from another RAIIAtenTensorHandle using std::move + RAIIAtenTensorHandle(RAIIAtenTensorHandle&& other) = default; + RAIIAtenTensorHandle& operator=(RAIIAtenTensorHandle&& other) = default; + + // Steal the ownership from raw AtenTensorHandle + RAIIAtenTensorHandle(AtenTensorHandle handle) + : handle_(handle, delete_tensor_object) {} + + ~RAIIAtenTensorHandle() { + handle_.reset(); + } + + // Return a raw AtenTensorHandle to be used by aoti_torch functions + // Note: this function does NOT transfer the ownership of the handle + operator AtenTensorHandle() const { + return handle_.get(); + } + + AtenTensorHandle release() { + return handle_.release(); + } + + AtenTensorHandle get() const { + return handle_.get(); + } + + void reset() { + handle_.reset(); + } + + int64_t size(int64_t d) { + int64_t size; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_size(handle_.get(), d, &size)); + return size; + } + + int64_t stride(int64_t d) { + int64_t stride; + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_get_stride(handle_.get(), d, &stride)); + return stride; + } + + int64_t storage_offset() { + int64_t storage_offset; + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_get_storage_offset(handle_.get(), &storage_offset)); + return storage_offset; + } + + private: + std::unique_ptr handle_; +}; + +// Steal the ownership from raw AtenTensorHandle to RAIIAtenTensorHandle +inline std::vector steal_from_raw_handles_to_raii_handles( + AtenTensorHandle* handles, + size_t size) { + std::vector result; + result.reserve(size); + for (size_t i = 0; i < size; i++) { + result.emplace_back(handles[i]); + handles[i] = nullptr; + } + return result; +} + +class ConstantHandle { + public: + ConstantHandle() = default; + + explicit ConstantHandle(AtenTensorHandle handle) : handle_(handle) { + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_data_ptr(handle_, &data_)); + } + + operator AtenTensorHandle() const { + return handle_; + } + + AtenTensorHandle tensor() const { + return handle_; + } + + void* data_ptr() const { + return data_; + } + + private: + AtenTensorHandle handle_; + void* data_ = nullptr; +}; + +inline void* get_data_ptr_wrapper(const ConstantHandle& constant) { + return constant.data_ptr(); +} + +inline const ConstantHandle& unwrap_raii_handle_if_needed( + const ConstantHandle& handle) { + return handle; +} + +// Shouldn't be called. +inline AtenTensorHandle wrap_with_raii_handle_if_needed( + const ConstantHandle& handle) = delete; + +#define CACHE_TORCH_DTYPE(typename) \ + static auto cached_torch_dtype_##typename = aoti_torch_dtype_##typename() + +#define CACHE_TORCH_DEVICE(device) \ + static auto cached_torch_device_type_##device = \ + aoti_torch_device_type_##device() + +} // namespace torch::aot_inductor diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/utils_cuda.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/utils_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..0593a2a5a7af59ea82514c03a3a741fb0522692d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/utils_cuda.h @@ -0,0 +1,58 @@ +#pragma once + +#ifdef USE_CUDA +// WARNING: Be careful when adding new includes here. This header will be used +// in model.so, and should not refer to any aten/c10 headers except the stable +// C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule +// applies to other files under torch/csrc/inductor/aoti_runtime/. +#include + +#include +#include + +namespace torch::aot_inductor { + +inline void delete_cuda_guard(void* ptr) { + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_delete_cuda_guard(reinterpret_cast(ptr))); +} + +inline void delete_cuda_stream_guard(void* ptr) { + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_delete_cuda_stream_guard( + reinterpret_cast(ptr))); +} + +class AOTICudaGuard { + public: + AOTICudaGuard(int32_t device_index) : guard_(nullptr, delete_cuda_guard) { + CUDAGuardHandle ptr; + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_create_cuda_guard(device_index, &ptr)); + guard_.reset(ptr); + } + + void set_index(int32_t device_index) { + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_cuda_guard_set_index(guard_.get(), device_index)); + } + + private: + std::unique_ptr guard_; +}; + +class AOTICudaStreamGuard { + public: + AOTICudaStreamGuard(cudaStream_t stream, int32_t device_index) + : guard_(nullptr, delete_cuda_stream_guard) { + CUDAStreamGuardHandle ptr; + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_create_cuda_stream_guard(stream, device_index, &ptr)); + guard_.reset(ptr); + } + + private: + std::unique_ptr guard_; +}; + +} // namespace torch::aot_inductor +#endif // USE_CUDA diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/c/shim.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/c/shim.h new file mode 100644 index 0000000000000000000000000000000000000000..2995e8151fabb0454b718d7be486a6922cb2d6d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/c/shim.h @@ -0,0 +1,571 @@ +#ifndef AOTI_TORCH_SHIM +#define AOTI_TORCH_SHIM + +#include +#include + +// This header defines a stable C API for certain ATen functionality in +// libtorch. The AOTInductor compiled model.so will only refer to this header +// instead of other headers from aten/c10, which means it will NOT be able to +// directly use any data structures or call functions from libtorch. +// +// What problems are we trying to solve here? Direct use of aten/c10 APIs +// means use of C++ APIs on a library that doesn't have any ABI compatibility +// guarantees. However, we want model.so to remain usable across updates +// to the PyTorch C++ libraries, which requires a stable ABI. By introducing +// a C shim layer, we can minimize the surface that will cause breakage. The +// corresponding software stack can be illustrated as follows: +// +// |--------------------------------| +// | inference service code | +// |--------------------------------| +// | model.so | +// |--------------|-----------------| +// | | +// | libtorch.so | +// |--------------------------------| +// +// The general guidelines for the C API: +// +// - No exceptions, return an explicit error code to be checked at call site +// - Only pointers (AtenTensorHandle counts), integers and floats in headers +// +// If you want to make changes to this header, you MUST MAINTAIN ABI +// compatibility. Typically, this means you will have to add a _v2 version +// of a function that you, e.g., want to add a new function parameter to, and +// maintain the old and new versions of the APIs until all old model.so +// go out of use. + +#ifdef __GNUC__ +#define AOTI_TORCH_EXPORT __attribute__((__visibility__("default"))) +#else // !__GNUC__ +#ifdef _WIN32 +#define AOTI_TORCH_EXPORT __declspec(dllexport) +#else // !_WIN32 +#define AOTI_TORCH_EXPORT +#endif // _WIN32 +#endif // __GNUC__ + +#ifdef __cplusplus +extern "C" { +#endif + +// AtenTensorHandle represents an abstract notion of Tensor that can be passed +// between model.so and libtorch.so. The contents of the structure itself +// are private; model.so is not allowed to access any fields directly, it must +// go through functions defined in this ABI. Under the hood, this is +// represented as at::Tensor*, but we reserve the right to change this (and in +// fact, we probably should change it to at::TensorImpl* at least). +// +// An AtenTensorHandle can be owning (please check the API reference for exact +// ownership/borrow semantics). If you have an owning AtenTensorHandle +// in model.so, you are obligated to aoti_torch_delete_tensor_object when you +// are done. You can use the helper C++ class RAIIAtenTensorHandle +// (see aot_runtime/model.h) to ensure the deallocator is called in RAII style +// (note that RAIIAtenTensorHandle is private to model.so, and never crosses +// the ABI boundary.) +struct AtenTensorOpaque; +using AtenTensorHandle = AtenTensorOpaque*; + +struct AOTIProxyExecutorOpaque; +using AOTIProxyExecutorHandle = AOTIProxyExecutorOpaque*; + +using AOTITorchError = int32_t; +#define AOTI_TORCH_SUCCESS 0 +#define AOTI_TORCH_FAILURE 1 + +// Getter functions for retrieving various constants from the runtime, that +// can subsequently be passed to other aoti_* functions. By hiding these +// behind functions, the precise value of device/dtype is NOT part of the +// ABI contract. (In practice, aten/c10 is pretty good about not renumbering +// these, so we probably could later switch to having these in the ABI, if +// desired for perf reasons.) +AOTI_TORCH_EXPORT int32_t aoti_torch_device_type_cpu(); +AOTI_TORCH_EXPORT int32_t aoti_torch_device_type_cuda(); + +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float8_e5m2(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float8_e4m3fn(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float8_e5m2fnuz(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float8_e4m3fnuz(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_bfloat16(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float16(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float32(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_float64(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_uint8(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_uint16(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_uint32(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_uint64(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_int8(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_int16(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_int32(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_int64(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_bool(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_complex32(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_complex64(); +AOTI_TORCH_EXPORT int32_t aoti_torch_dtype_complex128(); + +// Functions for converting a single-element tensor to a scalar value +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_item_float32(AtenTensorHandle tensor, float* ret_value); +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_item_float64(AtenTensorHandle tensor, double* ret_value); +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_item_uint8(AtenTensorHandle tensor, uint8_t* ret_value); +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_item_uint16(AtenTensorHandle tensor, uint16_t* ret_value); +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_item_uint32(AtenTensorHandle tensor, uint32_t* ret_value); +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_item_uint64(AtenTensorHandle tensor, uint64_t* ret_value); +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_item_int8(AtenTensorHandle tensor, int8_t* ret_value); +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_item_int16(AtenTensorHandle tensor, int16_t* ret_value); +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_item_int32(AtenTensorHandle tensor, int32_t* ret_value); +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_item_int64(AtenTensorHandle tensor, int64_t* ret_value); +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_item_bool(AtenTensorHandle tensor, bool* ret_value); + +// Functions for wrapping a scalar value to a single-element tensor +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_float32( + float value, + AtenTensorHandle* ret_new_tensor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_float64( + double value, + AtenTensorHandle* ret_new_tensor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_uint8( + uint8_t value, + AtenTensorHandle* ret_new_tensor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_uint16( + uint16_t value, + AtenTensorHandle* ret_new_tensor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_uint32( + uint32_t value, + AtenTensorHandle* ret_new_tensor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_uint64( + uint64_t value, + AtenTensorHandle* ret_new_tensor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_int8( + int8_t value, + AtenTensorHandle* ret_new_tensor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_int16( + int16_t value, + AtenTensorHandle* ret_new_tensor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_int32( + int32_t value, + AtenTensorHandle* ret_new_tensor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scalar_to_tensor_int64( + int64_t value, + AtenTensorHandle* ret_new_tensor); +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_scalar_to_tensor_bool(bool value, AtenTensorHandle* ret_new_tensor); + +AOTI_TORCH_EXPORT bool aoti_torch_grad_mode_is_enabled(); +AOTI_TORCH_EXPORT void aoti_torch_grad_mode_set_enabled(bool enabled); + +// Free the tensor object +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_delete_tensor_object(AtenTensorHandle tensor); + +// Get a pointer to the underlying storage data +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_data_ptr( + AtenTensorHandle tensor, + void** ret_data_ptr // returns borrowed reference +); + +// Get the nbytes of the underlying storage +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_get_storage_size(AtenTensorHandle tensor, int64_t* ret_size); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_get_dim(AtenTensorHandle tensor, int64_t* ret_dim); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_get_numel(AtenTensorHandle tensor, int64_t* ret_numel); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_sizes( + AtenTensorHandle tensor, + int64_t** ret_sizes // returns borrowed reference +); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_get_size(AtenTensorHandle tensor, int64_t d, int64_t* ret_size); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_strides( + AtenTensorHandle tensor, + int64_t** ret_strides // returns borrowed reference +); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_get_stride(AtenTensorHandle tensor, int64_t d, int64_t* ret_stride); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_get_dtype(AtenTensorHandle tensor, int32_t* ret_dtype); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_get_device_type(AtenTensorHandle tensor, int32_t* ret_device_type); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_get_device_index(AtenTensorHandle tensor, int32_t* ret_device_index); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_get_storage_offset( + AtenTensorHandle tensor, + int64_t* ret_storage_offset); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch__alloc_from_pool( + AtenTensorHandle self, + int64_t offset_bytes, + int32_t dtype, + int64_t ndim, + const int64_t* sizes_ptr, + const int64_t* strides_ptr, + AtenTensorHandle* ret_new_tensor); + +// This function will create a new tensor object and its pointer is returned +// through *out. The caller is responsible for wrapping the tensor pointer +// with RAIIAtenTensorHandle which will call aoti_torch_delete_tensor_object +// when going out of scope. +AOTI_TORCH_EXPORT AOTITorchError aoti_torch__reinterpret_tensor( + AtenTensorHandle self, + int64_t ndim, + const int64_t* sizes_ptr, + const int64_t* strides_ptr, + int64_t storage_offset, + AtenTensorHandle* ret_new_tensor // returns new reference +); + +// This function will create a new tensor object and its pointer is returned +// through *out. The caller is responsible for wrapping the tensor pointer +// with RAIIAtenTensorHandle which will call aoti_torch_delete_tensor_object +// when going out of scope. +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_empty_strided( + int64_t ndim, + const int64_t* sizes_ptr, + const int64_t* strides_ptr, + int32_t dtype, + int32_t device_type, + int32_t device_index, + AtenTensorHandle* ret_new_tensor // returns new reference +); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_create_tensor_from_blob( + void* data, + int64_t ndim, + const int64_t* sizes_ptr, + const int64_t* strides_ptr, + int64_t storage_offset, + int32_t dtype, + int32_t device_type, + int32_t device_index, + AtenTensorHandle* ret // returns new reference +); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch__embedding_bag( + AtenTensorHandle weight, + AtenTensorHandle indices, + AtenTensorHandle offsets, + int32_t scale_grad_by_freq, + int32_t mode, + int32_t sparse, + AtenTensorHandle per_sample_weights, // optional argument + int32_t include_last_offset, + int32_t padding_idx, + AtenTensorHandle* ret0, // returns new reference + AtenTensorHandle* ret1, // returns new reference + AtenTensorHandle* ret2, // returns new reference + AtenTensorHandle* ret3 // returns new reference +); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch__fft_c2c( + AtenTensorHandle self, + const int64_t* dim_ptr, + int64_t dim_size, + int64_t normalization, + int32_t forward, + AtenTensorHandle* ret // returns new reference +); + +// This version is deprecated. We will remove it later +AOTI_TORCH_EXPORT AOTITorchError aoti_torch__scaled_dot_product_flash_attention( + AtenTensorHandle query, + AtenTensorHandle key, + AtenTensorHandle value, + double dropout_p, + bool is_causal, + bool return_debug_mask, + double scale, + AtenTensorHandle* ret0, // returns new reference + AtenTensorHandle* ret1, // returns new reference + AtenTensorHandle* ret2, // returns new reference + AtenTensorHandle* ret3, // returns new reference + int64_t* ret4, + int64_t* ret5, + AtenTensorHandle* ret6, // returns new reference + AtenTensorHandle* ret7, // returns new reference + AtenTensorHandle* ret8 // returns new reference +); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch__scaled_dot_product_flash_attention_v2( + AtenTensorHandle query, + AtenTensorHandle key, + AtenTensorHandle value, + double dropout_p, + int is_causal, + int return_debug_mask, + double* scale, // optional argument + AtenTensorHandle* ret0, // returns new reference + AtenTensorHandle* ret1, // returns new reference + AtenTensorHandle* ret2, // returns new reference + AtenTensorHandle* ret3, // returns new reference + int64_t* ret4, + int64_t* ret5, + AtenTensorHandle* ret6, // returns new reference + AtenTensorHandle* ret7, // returns new reference + AtenTensorHandle* ret8 // returns new reference +); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch__scaled_dot_product_efficient_attention( + AtenTensorHandle query, + AtenTensorHandle key, + AtenTensorHandle value, + AtenTensorHandle attn_bias, // optional argument + int compute_log_sumexp, + double dropout_p, + int is_causal, + double* scale, // optional argument + AtenTensorHandle* ret0, // returns new reference + AtenTensorHandle* ret1, // returns new reference + AtenTensorHandle* ret2, // returns new reference + AtenTensorHandle* ret3 // returns new reference +); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch__scaled_mm( + AtenTensorHandle self, + AtenTensorHandle mat2, + AtenTensorHandle bias, + int32_t* out_dtype, + AtenTensorHandle scale_a, + AtenTensorHandle scale_b, + AtenTensorHandle scale_result, + int8_t use_fast_accum, + AtenTensorHandle* ret0, + AtenTensorHandle* ret1); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_convolution( + AtenTensorHandle input, + AtenTensorHandle weight, + AtenTensorHandle bias, // optional argument + const int64_t* stride_ptr, + int64_t stride_size, + const int64_t* padding_ptr, + int64_t padding_size, + const int64_t* dilation_ptr, + int64_t dilation_size, + int transposed, + const int64_t* output_padding_ptr, + int64_t output_padding_size, + int64_t groups, + AtenTensorHandle* ret // returns new reference +); + +// This function will create a new uninitialized tensor object +// and its pointer is returned through *ret. +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_new_uninitialized_tensor(AtenTensorHandle* ret); + +// WARNING: This will be deprecated. Use aoti_torch_copy_ instead. +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_tensor_copy_(AtenTensorHandle src, AtenTensorHandle dst); + +// Make the tensor referred to by dst an alias for the tensor referred +// to by src. The two tensors must still be deleted with +// aoti_torch_delete_tensor separately (or not) as before the call. +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_assign_tensors(AtenTensorHandle src, AtenTensorHandle dst); + +// Make a shallow copy of the tensor referred to by src and assign +// it to the handle in the ret_dst. This is similar to the above +// aoti_torch_assign_tensors function, but creates and sets the +// ret_dst from within. +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_assign_tensors_out(AtenTensorHandle src, AtenTensorHandle* ret_dst); + +// This function will create a new tensor object and its pointer is returned +// through *ret. The caller is responsible for wrapping the tensor pointer +// with RAIIAtenTensorHandle which will call aoti_torch_delete_tensor_object +// when going out of scope. +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_clone(AtenTensorHandle self, AtenTensorHandle* ret); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_addmm_out( + AtenTensorHandle out, + AtenTensorHandle self, + AtenTensorHandle mat1, + AtenTensorHandle mat2, + float beta, + float alpha); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_bmm_out( + AtenTensorHandle out, + AtenTensorHandle self, + AtenTensorHandle mat2); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_copy_( + AtenTensorHandle self, + AtenTensorHandle src, + int32_t non_blocking); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_mm_out( + AtenTensorHandle out, + AtenTensorHandle self, + AtenTensorHandle mat2); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_nonzero(AtenTensorHandle self, AtenTensorHandle* out); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_repeat_interleave_Tensor( + AtenTensorHandle repeats, + int64_t* output_size, + AtenTensorHandle* out); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_check_inf_and_nan(AtenTensorHandle tensor); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scatter_out( + AtenTensorHandle out, + AtenTensorHandle self, + int64_t dim, + AtenTensorHandle index, + AtenTensorHandle src); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_scatter_reduce_out( + AtenTensorHandle out, + AtenTensorHandle self, + int64_t dim, + AtenTensorHandle index, + AtenTensorHandle src, + const char* reduce, + int32_t include_self); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_index_put_out( + AtenTensorHandle out, + AtenTensorHandle self, + const AtenTensorHandle* indices, + const uint32_t num_indices, + const AtenTensorHandle values, + bool accumulate); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_view_as_real( + AtenTensorHandle self, + AtenTensorHandle* ret // returns new reference +); + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_view_dtype( + AtenTensorHandle self, + int32_t dtype, + AtenTensorHandle* ret // returns new reference +); + +AOTI_TORCH_EXPORT void aoti_torch_print_tensor_handle( + AtenTensorHandle self, + const char* msg); + +#ifdef USE_CUDA + +struct CUDAGuardOpaque; +using CUDAGuardHandle = CUDAGuardOpaque*; + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_create_cuda_guard( + int32_t device_index, + CUDAGuardHandle* ret_guard // returns new reference +); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_delete_cuda_guard(CUDAGuardHandle guard); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_cuda_guard_set_index(CUDAGuardHandle guard, int32_t device_index); + +struct CUDAStreamGuardOpaque; +using CUDAStreamGuardHandle = CUDAStreamGuardOpaque*; + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_create_cuda_stream_guard( + void* stream, + int32_t device_index, + CUDAStreamGuardHandle* ret_guard // returns new reference +); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_delete_cuda_stream_guard(CUDAStreamGuardHandle guard); + +AOTI_TORCH_EXPORT AOTITorchError +aoti_torch_get_current_cuda_stream(int32_t device_index, void** ret_stream); + +#endif + +// See `ProxyExecutor Design Note` in ir.py for more details +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_proxy_executor_call_function( + AOTIProxyExecutorHandle proxy_executor, + int extern_node_index, + int num_ints, + int64_t* flatten_int_args, + int num_tensors, + AtenTensorHandle* flatten_tensor_args); + +AOTI_TORCH_EXPORT void aoti_torch_check( + bool cond, + const char* func, + const char* file, + uint32_t line, + const char* msg); + +#ifdef STRIP_ERROR_MESSAGES +#define AOTI_TORCH_CHECK(cond, ...) \ + aoti_torch_check( \ + cond, \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + TORCH_CHECK_MSG(cond, "", __VA_ARGS__)); +#else +#define AOTI_TORCH_CHECK(cond, ...) \ + aoti_torch_check( \ + cond, \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + TORCH_CHECK_MSG(cond, "", ##__VA_ARGS__)); +#endif + +#ifdef __cplusplus +} // extern "C" + +template +int32_t aoti_torch_dtype(); + +#define DEFINE_DTYPE_SPECIALIZATION(ctype, typename) \ + template <> \ + inline int32_t aoti_torch_dtype() { \ + return aoti_torch_dtype_##typename(); \ + } + +// REVIEW: bfloat16 and half don't seem to actually build? Do I have +// the wrong types? +// DEFINE_DTYPE_SPECIALIZATION(__bfloat16, bfloat16) +// DEFINE_DTYPE_SPECIALIZATION(half, float16) +DEFINE_DTYPE_SPECIALIZATION(float, float32) +DEFINE_DTYPE_SPECIALIZATION(double, float64) +DEFINE_DTYPE_SPECIALIZATION(uint8_t, uint8) +DEFINE_DTYPE_SPECIALIZATION(int8_t, int8) +DEFINE_DTYPE_SPECIALIZATION(int16_t, int16) +DEFINE_DTYPE_SPECIALIZATION(int32_t, int32) +DEFINE_DTYPE_SPECIALIZATION(int64_t, int64) +DEFINE_DTYPE_SPECIALIZATION(bool, bool) + +#endif + +#endif // AOTI_TORCH_SHIM diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/generated/c_shim_cpu.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/generated/c_shim_cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..5318fb6574a9f0358fbb7abae9fe6437118ed84d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/generated/c_shim_cpu.h @@ -0,0 +1,1280 @@ + +#pragma once + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fw_primal(AtenTensorHandle self, int64_t level, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__make_dual(AtenTensorHandle primal, AtenTensorHandle tangent, int64_t level, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__new_zeros_with_same_feature_meta(AtenTensorHandle self, AtenTensorHandle other, int64_t self_num_batch_dims, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__has_same_storage_numel(AtenTensorHandle self, AtenTensorHandle other, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__assert_async(AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__assert_async_msg(AtenTensorHandle self, const char* assert_msg); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__assert_scalar(double self, const char* assert_msg); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__functional_assert_scalar(double self, const char* assert_msg, AtenTensorHandle dep_token, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__functional_assert_async_msg(AtenTensorHandle self, const char* assert_msg, AtenTensorHandle dep_token, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__print(const char* s); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sym_constrain_range(double size, int64_t* min, int64_t* max); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sym_constrain_range_for_size(double size, int64_t* min, int64_t* max); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__functional_sym_constrain_range(double size, int64_t* min, int64_t* max, AtenTensorHandle dep_token, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__functional_sym_constrain_range_for_size(double size, int64_t* min, int64_t* max, AtenTensorHandle dep_token, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__make_dep_token(int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_dropout(AtenTensorHandle input, double p, int32_t* train, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_abs_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_as_real(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_as_complex(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__conj(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__conj_physical(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__neg_view(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__add_relu_Tensor(AtenTensorHandle self, AtenTensorHandle other, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__add_relu__Tensor(AtenTensorHandle self, AtenTensorHandle other, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__add_relu_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__add_relu_Scalar(AtenTensorHandle self, double other, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__add_relu__Scalar(AtenTensorHandle self, double other, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_addmv_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat, AtenTensorHandle vec, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_addr(AtenTensorHandle self, AtenTensorHandle vec1, AtenTensorHandle vec2, double beta, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_addr_(AtenTensorHandle self, AtenTensorHandle vec1, AtenTensorHandle vec2, double beta, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_addr_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle vec1, AtenTensorHandle vec2, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_affine_grid_generator(AtenTensorHandle theta, const int64_t* size, int64_t size_len_, int32_t align_corners, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__is_all_true(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__is_any_true(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_functorch_fallback(AtenTensorHandle self, AtenTensorHandle other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_all_dims(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_all_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_all_dims_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_allclose(AtenTensorHandle self, AtenTensorHandle other, double rtol, double atol, int32_t equal_nan, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_any_dims(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_any_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_any_dims_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_arange(double end, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_arange_start(double start, double end, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_arange_start_step(double start, double end, double step, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_arange_out(AtenTensorHandle out, double end); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_arange_start_out(AtenTensorHandle out, double start, double end, double step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_argmax_out(AtenTensorHandle out, AtenTensorHandle self, int64_t* dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_argmin_out(AtenTensorHandle out, AtenTensorHandle self, int64_t* dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_as_strided(AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int64_t* storage_offset, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_as_strided_(AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int64_t* storage_offset, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_baddbmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bartlett_window(int64_t window_length, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bartlett_window_periodic(int64_t window_length, int32_t periodic, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_binary_cross_entropy(AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_binary_cross_entropy_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_binary_cross_entropy_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_binary_cross_entropy_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_binary_cross_entropy_with_logits(AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, AtenTensorHandle* pos_weight, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bincount(AtenTensorHandle self, AtenTensorHandle* weights, int64_t minlength, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_copysign__Scalar(AtenTensorHandle self, double other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__lazy_clone(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_blackman_window(int64_t window_length, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_blackman_window_periodic(int64_t window_length, int32_t periodic, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cat_out(AtenTensorHandle out, const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_block_diag(const AtenTensorHandle* tensors, int64_t tensors_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_complex(AtenTensorHandle real, AtenTensorHandle imag, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_complex_out(AtenTensorHandle out, AtenTensorHandle real, AtenTensorHandle imag); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_polar(AtenTensorHandle abs, AtenTensorHandle angle, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_polar_out(AtenTensorHandle out, AtenTensorHandle abs, AtenTensorHandle angle); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_constant_pad_nd(AtenTensorHandle self, const int64_t* pad, int64_t pad_len_, double value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_convolution(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_convolution_backward(AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle weight, const int64_t** bias_sizes, int64_t bias_sizes_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_convolution_overrideable(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_convolution_backward_overrideable(AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle weight, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__convolution(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, int32_t benchmark, int32_t deterministic, int32_t cudnn_enabled, int32_t allow_tf32, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_conv_tbc(AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle bias, int64_t pad, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_copy(AtenTensorHandle self, AtenTensorHandle src, int32_t non_blocking, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_copy_(AtenTensorHandle self, AtenTensorHandle src, int32_t non_blocking, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_count_nonzero_dim_IntList(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_count_nonzero(AtenTensorHandle self, int64_t* dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cummax(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cummax_out(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cummax_helper(AtenTensorHandle self, AtenTensorHandle values, AtenTensorHandle indices, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cummin(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cummin_out(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cummin_helper(AtenTensorHandle self, AtenTensorHandle values, AtenTensorHandle indices, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cumprod_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cumsum_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__ctc_loss(AtenTensorHandle log_probs, AtenTensorHandle targets, const int64_t* input_lengths, int64_t input_lengths_len_, const int64_t* target_lengths, int64_t target_lengths_len_, int64_t blank, int32_t zero_infinity, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__ctc_loss_Tensor(AtenTensorHandle log_probs, AtenTensorHandle targets, AtenTensorHandle input_lengths, AtenTensorHandle target_lengths, int64_t blank, int32_t zero_infinity, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__ctc_loss_backward(AtenTensorHandle grad, AtenTensorHandle log_probs, AtenTensorHandle targets, const int64_t* input_lengths, int64_t input_lengths_len_, const int64_t* target_lengths, int64_t target_lengths_len_, AtenTensorHandle neg_log_likelihood, AtenTensorHandle log_alpha, int64_t blank, int32_t zero_infinity, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__ctc_loss_backward_Tensor(AtenTensorHandle grad, AtenTensorHandle log_probs, AtenTensorHandle targets, AtenTensorHandle input_lengths, AtenTensorHandle target_lengths, AtenTensorHandle neg_log_likelihood, AtenTensorHandle log_alpha, int64_t blank, int32_t zero_infinity, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_diag_embed(AtenTensorHandle self, int64_t offset, int64_t dim1, int64_t dim2, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_diagonal(AtenTensorHandle self, int64_t offset, int64_t dim1, int64_t dim2, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_diagonal_backward(AtenTensorHandle grad_output, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t offset, int64_t dim1, int64_t dim2, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_dot(AtenTensorHandle self, AtenTensorHandle tensor, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_dot_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle tensor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_vdot(AtenTensorHandle self, AtenTensorHandle other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_vdot_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_embedding(AtenTensorHandle weight, AtenTensorHandle indices, int64_t padding_idx, int32_t scale_grad_by_freq, int32_t sparse, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_embedding_dense_backward(AtenTensorHandle grad_output, AtenTensorHandle indices, int64_t num_weights, int64_t padding_idx, int32_t scale_grad_by_freq, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_embedding_renorm_(AtenTensorHandle self, AtenTensorHandle indices, double max_norm, double norm_type, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__embedding_bag_forward_only(AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, int32_t scale_grad_by_freq, int64_t mode, int32_t sparse, AtenTensorHandle* per_sample_weights, int32_t include_last_offset, int64_t padding_idx, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__embedding_bag(AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, int32_t scale_grad_by_freq, int64_t mode, int32_t sparse, AtenTensorHandle* per_sample_weights, int32_t include_last_offset, int64_t padding_idx, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__embedding_bag_dense_backward(AtenTensorHandle grad, AtenTensorHandle indices, AtenTensorHandle offset2bag, AtenTensorHandle bag_size, AtenTensorHandle maximum_indices, int64_t num_weights, int32_t scale_grad_by_freq, int64_t mode, AtenTensorHandle* per_sample_weights, int64_t padding_idx, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__embedding_bag_per_sample_weights_backward(AtenTensorHandle grad, AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, AtenTensorHandle offset2bag, int64_t mode, int64_t padding_idx, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_empty_memory_format(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_empty_permuted(const int64_t* size, int64_t size_len_, const int64_t* physical_layout, int64_t physical_layout_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_new_empty(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_new_empty_strided(AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_new_full(AtenTensorHandle self, const int64_t* size, int64_t size_len_, double fill_value, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_new_zeros(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_new_ones(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__empty_affine_quantized(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, double scale, int64_t zero_point, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__empty_per_channel_affine_quantized(const int64_t* size, int64_t size_len_, AtenTensorHandle scales, AtenTensorHandle zero_points, int64_t axis, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_resize_(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_empty_like(AtenTensorHandle self, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_empty_strided(const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_expand(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t implicit, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_eye(int64_t n, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_eye_m(int64_t n, int64_t m, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_eye_out(AtenTensorHandle out, int64_t n); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_eye_m_out(AtenTensorHandle out, int64_t n, int64_t m); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fill_Scalar(AtenTensorHandle self, double value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fill_Tensor(AtenTensorHandle self, AtenTensorHandle value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fill__Scalar(AtenTensorHandle self, double value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fill__Tensor(AtenTensorHandle self, AtenTensorHandle value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_floor_divide(AtenTensorHandle self, AtenTensorHandle other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_floor_divide__Tensor(AtenTensorHandle self, AtenTensorHandle other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_floor_divide_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_floor_divide_Scalar(AtenTensorHandle self, double other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_floor_divide__Scalar(AtenTensorHandle self, double other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_full(const int64_t* size, int64_t size_len_, double fill_value, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_full_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_, double fill_value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_full_like(AtenTensorHandle self, double fill_value, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_from_file(const char* filename, int32_t* shared, int64_t* size, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_grid_sampler_2d(AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_grid_sampler_2d_backward(AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__grid_sampler_2d_cpu_fallback(AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_grid_sampler_3d(AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_grid_sampler_3d_backward(AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hann_window(int64_t window_length, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hann_window_periodic(int64_t window_length, int32_t periodic, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hamming_window(int64_t window_length, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hamming_window_periodic(int64_t window_length, int32_t periodic, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hamming_window_periodic_alpha(int64_t window_length, int32_t periodic, double alpha, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hamming_window_periodic_alpha_beta(int64_t window_length, int32_t periodic, double alpha, double beta, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_kaiser_window(int64_t window_length, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_kaiser_window_periodic(int64_t window_length, int32_t periodic, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_kaiser_window_beta(int64_t window_length, int32_t periodic, double beta, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_group_norm(AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_group_norm_backward(AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle rstd, AtenTensorHandle* weight, int64_t N, int64_t C, int64_t HxW, int64_t group, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fft_r2c(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int32_t onesided, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fft_r2c_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int32_t onesided); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fft_c2r(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int64_t last_dim_size, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fft_c2r_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int64_t last_dim_size); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fft_c2c(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int32_t forward, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fft_c2c_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int32_t forward); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__validate_compressed_sparse_indices(int32_t is_crow, AtenTensorHandle compressed_idx, AtenTensorHandle plain_idx, int64_t cdim, int64_t dim, int64_t nnz); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__unsafe_index_Tensor(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_copy_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle source); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_put_(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_put(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__unsafe_index_put(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__index_put_impl_(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, int32_t unsafe, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_isin_Tensor_Tensor_out(AtenTensorHandle out, AtenTensorHandle elements, AtenTensorHandle test_elements, int32_t assume_unique, int32_t invert); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_isin_Tensor_Scalar_out(AtenTensorHandle out, AtenTensorHandle elements, double test_element, int32_t assume_unique, int32_t invert); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_isin_Scalar_Tensor_out(AtenTensorHandle out, double element, AtenTensorHandle test_elements, int32_t assume_unique, int32_t invert); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_is_same_size(AtenTensorHandle self, AtenTensorHandle other, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_kthvalue(AtenTensorHandle self, int64_t k, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_kthvalue_values(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t k, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_layer_norm(AtenTensorHandle input, const int64_t* normalized_shape, int64_t normalized_shape_len_, AtenTensorHandle* weight, AtenTensorHandle* bias, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_layer_norm_backward(AtenTensorHandle grad_out, AtenTensorHandle input, const int64_t* normalized_shape, int64_t normalized_shape_len_, AtenTensorHandle mean, AtenTensorHandle rstd, AtenTensorHandle* weight, AtenTensorHandle* bias, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linear_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linspace(double start, double end, int64_t steps, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linspace_Tensor_Tensor(AtenTensorHandle start, AtenTensorHandle end, int64_t steps, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linspace_Tensor_Scalar(AtenTensorHandle start, double end, int64_t steps, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linspace_Scalar_Tensor(double start, AtenTensorHandle end, int64_t steps, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linspace_out(AtenTensorHandle out, double start, double end, int64_t steps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linspace_Tensor_Tensor_out(AtenTensorHandle out, AtenTensorHandle start, AtenTensorHandle end, int64_t steps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linspace_Tensor_Scalar_out(AtenTensorHandle out, AtenTensorHandle start, double end, int64_t steps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linspace_Scalar_Tensor_out(AtenTensorHandle out, double start, AtenTensorHandle end, int64_t steps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_xlogy__Scalar_Other(AtenTensorHandle self, double other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_logspace(double start, double end, int64_t steps, double base, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_logspace_Tensor_Tensor(AtenTensorHandle start, AtenTensorHandle end, int64_t steps, double base, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_logspace_Tensor_Scalar(AtenTensorHandle start, double end, int64_t steps, double base, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_logspace_Scalar_Tensor(double start, AtenTensorHandle end, int64_t steps, double base, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_logspace_out(AtenTensorHandle out, double start, double end, int64_t steps, double base); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_logspace_Tensor_Tensor_out(AtenTensorHandle out, AtenTensorHandle start, AtenTensorHandle end, int64_t steps, double base); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_logspace_Tensor_Scalar_out(AtenTensorHandle out, AtenTensorHandle start, double end, int64_t steps, double base); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_logspace_Scalar_Tensor_out(AtenTensorHandle out, double start, AtenTensorHandle end, int64_t steps, double base); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_log_softmax_int_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__log_softmax_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t half_to_float); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__log_softmax_backward_data_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle output, int64_t dim, int32_t input_dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__logcumsumexp(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__logcumsumexp_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_logcumsumexp(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_logcumsumexp_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_logsumexp(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int32_t keepdim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_logsumexp_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__aminmax(AtenTensorHandle self, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__aminmax_dim(AtenTensorHandle self, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_aminmax_out(AtenTensorHandle min, AtenTensorHandle max, AtenTensorHandle self, int64_t* dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__compute_linear_combination(AtenTensorHandle input, AtenTensorHandle coefficients, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__compute_linear_combination_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle coefficients); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_dim_max(AtenTensorHandle max, AtenTensorHandle max_values, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_amax_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mean(AtenTensorHandle self, int32_t* dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mean_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_median(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_median_dim(AtenTensorHandle self, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_median_dim_values(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nanmedian(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nanmedian_dim(AtenTensorHandle self, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nanmedian_dim_values(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_min_dim_min(AtenTensorHandle min, AtenTensorHandle min_indices, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_amin_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_convolution(AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_rnn_layer(AtenTensorHandle input, AtenTensorHandle weight0, AtenTensorHandle weight1, AtenTensorHandle weight2, AtenTensorHandle weight3, AtenTensorHandle hx_, AtenTensorHandle cx_, int32_t reverse, const int64_t* batch_sizes, int64_t batch_sizes_len_, int64_t mode, int64_t hidden_size, int64_t num_layers, int32_t has_biases, int32_t bidirectional, int32_t batch_first, int32_t train, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_rnn_layer_backward(AtenTensorHandle input, AtenTensorHandle weight1, AtenTensorHandle weight2, AtenTensorHandle weight3, AtenTensorHandle weight4, AtenTensorHandle hx_, AtenTensorHandle cx_tmp, AtenTensorHandle output, AtenTensorHandle hy_, AtenTensorHandle cy_, AtenTensorHandle* grad_output, AtenTensorHandle* grad_hy, AtenTensorHandle* grad_cy, int32_t reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, int32_t has_biases, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, int32_t batch_first, AtenTensorHandle workspace, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4, AtenTensorHandle* ret5, AtenTensorHandle* ret6); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__convert_weight_to_int4pack(AtenTensorHandle self, int64_t innerKTiles, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__weight_int4pack_mm(AtenTensorHandle self, AtenTensorHandle mat2, int64_t qGroupSize, AtenTensorHandle qScaleAndZeros, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__weight_int8pack_mm(AtenTensorHandle self, AtenTensorHandle mat2, AtenTensorHandle scales, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mode(AtenTensorHandle self, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mode_values(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mv(AtenTensorHandle self, AtenTensorHandle vec, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mv_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle vec); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_narrow_copy(AtenTensorHandle self, int64_t dim, int64_t start, int64_t length, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_narrow_copy_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int64_t start, int64_t length); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_batch_norm(AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, int32_t training, double momentum, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_batch_norm_out(AtenTensorHandle out, AtenTensorHandle save_mean, AtenTensorHandle save_invstd, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, int32_t training, double momentum, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__native_batch_norm_legit(AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle running_mean, AtenTensorHandle running_var, int32_t training, double momentum, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__native_batch_norm_legit_no_training(AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle running_mean, AtenTensorHandle running_var, double momentum, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__native_batch_norm_legit_out(AtenTensorHandle out, AtenTensorHandle save_mean, AtenTensorHandle save_invstd, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle running_mean, AtenTensorHandle running_var, int32_t training, double momentum, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__native_batch_norm_legit_no_stats(AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, int32_t training, double momentum, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__native_batch_norm_legit_no_stats_out(AtenTensorHandle out, AtenTensorHandle save_mean, AtenTensorHandle save_invstd, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, int32_t training, double momentum, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_batch_norm_backward(AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, AtenTensorHandle* save_mean, AtenTensorHandle* save_invstd, int32_t train, double eps, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_batch_norm_update_stats(AtenTensorHandle input, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, double momentum, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nnpack_spatial_convolution(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_ones(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_ones_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_ones_like(AtenTensorHandle self, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__euclidean_dist(AtenTensorHandle x1, AtenTensorHandle x2, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cdist_forward(AtenTensorHandle x1, AtenTensorHandle x2, double p, int64_t* compute_mode, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cdist_backward(AtenTensorHandle grad, AtenTensorHandle x1, AtenTensorHandle x2, double p, AtenTensorHandle cdist, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__pdist_forward(AtenTensorHandle self, double p, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__pdist_backward(AtenTensorHandle grad, AtenTensorHandle self, double p, AtenTensorHandle pdist, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_permute(AtenTensorHandle self, const int64_t* dims, int64_t dims_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_pixel_shuffle(AtenTensorHandle self, int64_t upscale_factor, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_pixel_unshuffle(AtenTensorHandle self, int64_t downscale_factor, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_channel_shuffle(AtenTensorHandle self, int64_t groups, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_channel_shuffle(AtenTensorHandle self, int64_t groups, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_is_pinned(AtenTensorHandle self, int32_t* device, int32_t device_index_, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rad2deg(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rad2deg_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rad2deg_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_scalar_tensor(double s, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rand(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rand_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rand_like(AtenTensorHandle self, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randint(int64_t high, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randint_low(int64_t low, int64_t high, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randint_out(AtenTensorHandle out, int64_t high, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randint_low_out(AtenTensorHandle out, int64_t low, int64_t high, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randint_like(AtenTensorHandle self, int64_t high, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randint_like_low_dtype(AtenTensorHandle self, int64_t low, int64_t high, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randn(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randn_like(AtenTensorHandle self, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randperm(int64_t n, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randperm_out(AtenTensorHandle out, int64_t n); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_range_step(double start, double end, double step, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_range(double start, double end, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_range_out_(AtenTensorHandle out, double start, double end); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_range_out(AtenTensorHandle out, double start, double end, double step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_repeat(AtenTensorHandle self, const int64_t* repeats, int64_t repeats_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_repeat_interleave_Tensor(AtenTensorHandle repeats, int64_t* output_size, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__reshape_copy(AtenTensorHandle self, const int64_t* size, int64_t size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__reshape_alias(AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__prelu_kernel(AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__prelu_kernel_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_gelu_out(AtenTensorHandle out, AtenTensorHandle self, const char* approximate); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_gelu_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const char* approximate); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardshrink_out(AtenTensorHandle out, AtenTensorHandle self, double lambd); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardshrink_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_out, AtenTensorHandle self, double lambd); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_select_int(AtenTensorHandle self, int64_t dim, int64_t index, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_select_backward(AtenTensorHandle grad_output, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t dim, int64_t index, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_celu(AtenTensorHandle self, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_celu_(AtenTensorHandle self, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mish_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mish_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_detach(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_detach_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slice_Tensor(AtenTensorHandle self, int64_t dim, int64_t* start, int64_t* end, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slice_backward(AtenTensorHandle grad_output, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t dim, int64_t start, int64_t end, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slice_inverse(AtenTensorHandle self, AtenTensorHandle src, int64_t dim, int64_t* start, int64_t* end, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slice_scatter(AtenTensorHandle self, AtenTensorHandle src, int64_t dim, int64_t* start, int64_t* end, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_select_scatter(AtenTensorHandle self, AtenTensorHandle src, int64_t dim, int64_t index, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_diagonal_scatter(AtenTensorHandle self, AtenTensorHandle src, int64_t offset, int64_t dim1, int64_t dim2, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_as_strided_scatter(AtenTensorHandle self, AtenTensorHandle src, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int64_t* storage_offset, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_softmax_int_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__softmax_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t half_to_float); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__softmax_backward_data_out(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle output, int64_t dim, int32_t input_dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_squeeze(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_squeeze_dim(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_squeeze_dims(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_squeeze_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_squeeze__dim(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_squeeze__dims(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sspaddmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__chunk_cat(const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim, int64_t num_chunks, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__chunk_cat_out(AtenTensorHandle out, const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim, int64_t num_chunks); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_stack(const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_stack_out(AtenTensorHandle out, const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__stack(const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__stack_out(AtenTensorHandle out, const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sum(AtenTensorHandle self, int32_t* dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sum_IntList_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nansum(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nansum_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_std_correction(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_std_mean_correction(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_std_correction_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_prod(AtenTensorHandle self, int32_t* dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_prod_int_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_t(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_t_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_threshold_out(AtenTensorHandle out, AtenTensorHandle self, double threshold, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_threshold_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, double threshold); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_transpose_int(AtenTensorHandle self, int64_t dim0, int64_t dim1, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_transpose_(AtenTensorHandle self, int64_t dim0, int64_t dim1, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_flip(AtenTensorHandle self, const int64_t* dims, int64_t dims_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_roll(AtenTensorHandle self, const int64_t* shifts, int64_t shifts_len_, const int64_t* dims, int64_t dims_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rot90(AtenTensorHandle self, int64_t k, const int64_t* dims, int64_t dims_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__transform_bias_rescale_qkv(AtenTensorHandle qkv, AtenTensorHandle qkv_bias, int64_t num_heads, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_tensor_from_mask(AtenTensorHandle t, AtenTensorHandle mask, int32_t mask_check, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_tensor_from_mask_left_aligned(AtenTensorHandle t, AtenTensorHandle mask, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_from_padded(AtenTensorHandle padded, AtenTensorHandle cpu_nested_shape_example, int32_t fuse_transform_0213, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_view_from_buffer(AtenTensorHandle self, AtenTensorHandle nested_size, AtenTensorHandle nested_strides, AtenTensorHandle offsets, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_view_from_buffer_copy(AtenTensorHandle self, AtenTensorHandle nested_size, AtenTensorHandle nested_strides, AtenTensorHandle offsets, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_view_from_jagged_copy(AtenTensorHandle self, AtenTensorHandle offsets, AtenTensorHandle dummy, AtenTensorHandle* lengths, int64_t ragged_idx, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_get_values_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__trilinear(AtenTensorHandle i1, AtenTensorHandle i2, AtenTensorHandle i3, const int64_t* expand1, int64_t expand1_len_, const int64_t* expand2, int64_t expand2_len_, const int64_t* expand3, int64_t expand3_len_, const int64_t* sumdim, int64_t sumdim_len_, int64_t unroll_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__unique(AtenTensorHandle self, int32_t sorted, int32_t return_inverse, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unique_dim(AtenTensorHandle self, int64_t dim, int32_t sorted, int32_t return_inverse, int32_t return_counts, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unique_consecutive(AtenTensorHandle self, int32_t return_inverse, int32_t return_counts, int64_t* dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unique_dim_consecutive(AtenTensorHandle self, int64_t dim, int32_t return_inverse, int32_t return_counts, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__unique2(AtenTensorHandle self, int32_t sorted, int32_t return_inverse, int32_t return_counts, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__unsafe_view(AtenTensorHandle self, const int64_t* size, int64_t size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unsqueeze(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unsqueeze_(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_var_correction(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_var_correction_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_var_mean_correction(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_where_self_out(AtenTensorHandle out, AtenTensorHandle condition, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__weight_norm_interface(AtenTensorHandle v, AtenTensorHandle g, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__weight_norm_interface_backward(AtenTensorHandle grad_w, AtenTensorHandle saved_v, AtenTensorHandle saved_g, AtenTensorHandle saved_norms, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__efficientzerotensor(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_zeros(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_zeros_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_zeros_like(AtenTensorHandle self, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__standard_gamma_grad(AtenTensorHandle self, AtenTensorHandle output, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__dirichlet_grad(AtenTensorHandle x, AtenTensorHandle alpha, AtenTensorHandle total, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_sum_dim(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__spdiags(AtenTensorHandle diagonals, AtenTensorHandle offsets, const int64_t* shape, int64_t shape_len_, int32_t* layout, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_norm_ScalarOpt_dtype(AtenTensorHandle self, double* p, int32_t dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_norm_Scalar(AtenTensorHandle self, double p, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_norm_dtype_out(AtenTensorHandle out, AtenTensorHandle self, double* p, const int64_t* dim, int64_t dim_len_, int32_t keepdim, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_norm_out(AtenTensorHandle out, AtenTensorHandle self, double* p, const int64_t* dim, int64_t dim_len_, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_resize_as_(AtenTensorHandle self, AtenTensorHandle the_template, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_zero_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rsub_Tensor(AtenTensorHandle self, AtenTensorHandle other, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_addmm(AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_addmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__addmm_activation_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha, int32_t use_gelu); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sparse_compressed_tensor_comp_plain_value_size(AtenTensorHandle compressed_indices, AtenTensorHandle plain_indices, AtenTensorHandle values, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sparse_compressed_tensor_comp_plain_value(AtenTensorHandle compressed_indices, AtenTensorHandle plain_indices, AtenTensorHandle values, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sparse_coo_tensor_size(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sparse_dim(AtenTensorHandle self, int64_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_dense_dim(AtenTensorHandle self, int64_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_is_coalesced(AtenTensorHandle self, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_indices(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_values(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_crow_indices(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_col_indices(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_ccol_indices(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_row_indices(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_sparse_sparse_dim(AtenTensorHandle self, int64_t sparse_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_sparse(AtenTensorHandle self, int32_t* layout, const int64_t** blocksize, int64_t blocksize_len_, int64_t* dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_sparse_csr(AtenTensorHandle self, int64_t* dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_sparse_csc(AtenTensorHandle self, int64_t* dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_sparse_bsr(AtenTensorHandle self, const int64_t* blocksize, int64_t blocksize_len_, int64_t* dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_sparse_bsc(AtenTensorHandle self, const int64_t* blocksize, int64_t blocksize_len_, int64_t* dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_to_mkldnn(AtenTensorHandle self, int32_t* dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_quantize_per_tensor_dynamic(AtenTensorHandle self, int32_t dtype, int32_t reduce_range, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_quantize_per_tensor(AtenTensorHandle self, double scale, int64_t zero_point, int32_t dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_quantize_per_tensor_tensor_qparams(AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int32_t dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_quantize_per_channel(AtenTensorHandle self, AtenTensorHandle scales, AtenTensorHandle zero_points, int64_t axis, int32_t dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_dequantize_self(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__make_per_tensor_quantized_tensor(AtenTensorHandle self, double scale, int64_t zero_point, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__make_per_channel_quantized_tensor(AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fake_quantize_per_tensor_affine_cachemask(AtenTensorHandle self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fake_quantize_per_tensor_affine_cachemask_tensor_qparams(AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, AtenTensorHandle fake_quant_enabled, int64_t quant_min, int64_t quant_max, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fake_quantize_learnable_per_tensor_affine(AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fake_quantize_learnable_per_tensor_affine_backward(AtenTensorHandle grad, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fake_quantize_per_channel_affine_cachemask(AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fake_quantize_learnable_per_channel_affine(AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fake_quantize_learnable_per_channel_affine_backward(AtenTensorHandle grad, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fused_moving_avg_obs_fq_helper(AtenTensorHandle self, AtenTensorHandle observer_on, AtenTensorHandle fake_quant_on, AtenTensorHandle running_min, AtenTensorHandle running_max, AtenTensorHandle scale, AtenTensorHandle zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int32_t per_row_fake_quant, int32_t symmetric_quant, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_copy(AtenTensorHandle self, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t non_blocking, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__local_scalar_dense(AtenTensorHandle self, double* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__pack_padded_sequence(AtenTensorHandle input, AtenTensorHandle lengths, int32_t batch_first, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_set__source_Tensor(AtenTensorHandle self, AtenTensorHandle source, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_set_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_lift(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_lift_fresh(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_lift_fresh_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_is_set_to(AtenTensorHandle self, AtenTensorHandle tensor, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_fill__Scalar(AtenTensorHandle self, AtenTensorHandle mask, double value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_fill__Tensor(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_fill_Tensor(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_scatter_(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle source, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_scatter(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle source, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_scatter_backward(AtenTensorHandle grad_output, AtenTensorHandle mask, const int64_t* sizes, int64_t sizes_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__masked_softmax(AtenTensorHandle self, AtenTensorHandle mask, int64_t* dim, int64_t* mask_type, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__masked_softmax_backward(AtenTensorHandle grad_output, AtenTensorHandle output, AtenTensorHandle mask, int64_t* dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view(AtenTensorHandle self, const int64_t* size, int64_t size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_dtype(AtenTensorHandle self, int32_t dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_put_(AtenTensorHandle self, AtenTensorHandle index, AtenTensorHandle source, int32_t accumulate, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_put(AtenTensorHandle self, AtenTensorHandle index, AtenTensorHandle source, int32_t accumulate, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_add_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle source, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_reduce_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle source, const char* reduce, int32_t include_self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_fill__int_Scalar(AtenTensorHandle self, int64_t dim, AtenTensorHandle index, double value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_fill_int_Scalar(AtenTensorHandle self, int64_t dim, AtenTensorHandle index, double value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_fill__int_Tensor(AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_fill_int_Tensor(AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_scatter_src_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_scatter_value_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_scatter_reduce_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src, const char* reduce); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_scatter_value_reduce_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, double value, const char* reduce); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_scatter_add_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_scatter_reduce_two_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src, const char* reduce, int32_t include_self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu___irshift___Scalar(AtenTensorHandle self, double other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu___irshift___Tensor(AtenTensorHandle self, AtenTensorHandle other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_addbmm_(AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_addbmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_addbmm(AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_triu_out(AtenTensorHandle out, AtenTensorHandle self, int64_t diagonal); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_tril_out(AtenTensorHandle out, AtenTensorHandle self, int64_t diagonal); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_tril_indices(int64_t row, int64_t col, int64_t offset, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_triu_indices(int64_t row, int64_t col, int64_t offset, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_trace(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_take_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_take(AtenTensorHandle self, AtenTensorHandle index, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_select_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_select(AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_select_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_select(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nonzero_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nonzero(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nonzero_static_out(AtenTensorHandle out, AtenTensorHandle self, int64_t size, int64_t fill_value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nonzero_static(AtenTensorHandle self, int64_t size, int64_t fill_value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_gather_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, int32_t sparse_grad); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_triangular_solve_X(AtenTensorHandle X, AtenTensorHandle M, AtenTensorHandle self, AtenTensorHandle A, int32_t upper, int32_t transpose, int32_t unitriangular); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__linalg_check_errors(AtenTensorHandle info, const char* api_name, int32_t is_matrix); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_solve_triangular_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle B, int32_t upper, int32_t left, int32_t unitriangular); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_solve_triangular(AtenTensorHandle self, AtenTensorHandle B, int32_t upper, int32_t left, int32_t unitriangular, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cholesky_out(AtenTensorHandle out, AtenTensorHandle self, int32_t upper); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cholesky(AtenTensorHandle self, int32_t upper, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cholesky_solve_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle input2, int32_t upper); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cholesky_solve(AtenTensorHandle self, AtenTensorHandle input2, int32_t upper, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cholesky_solve_helper(AtenTensorHandle self, AtenTensorHandle A, int32_t upper, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cholesky_inverse(AtenTensorHandle self, int32_t upper, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cholesky_inverse_out(AtenTensorHandle out, AtenTensorHandle self, int32_t upper); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_geqrf_a(AtenTensorHandle a, AtenTensorHandle tau, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_geqrf(AtenTensorHandle self, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_ormqr_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle input2, AtenTensorHandle input3, int32_t left, int32_t transpose); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_ormqr(AtenTensorHandle self, AtenTensorHandle input2, AtenTensorHandle input3, int32_t left, int32_t transpose, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_lu_unpack_out(AtenTensorHandle P, AtenTensorHandle L, AtenTensorHandle U, AtenTensorHandle LU_data, AtenTensorHandle LU_pivots, int32_t unpack_data, int32_t unpack_pivots); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_dist(AtenTensorHandle self, AtenTensorHandle other, double p, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_histc_out(AtenTensorHandle out, AtenTensorHandle self, int64_t bins, double min, double max); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_histc(AtenTensorHandle self, int64_t bins, double min, double max, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_histogram_bins_tensor_out(AtenTensorHandle hist, AtenTensorHandle bin_edges, AtenTensorHandle self, AtenTensorHandle bins, AtenTensorHandle* weight, int32_t density); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_histogram_bins_tensor(AtenTensorHandle self, AtenTensorHandle bins, AtenTensorHandle* weight, int32_t density, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_histogram_bin_ct_out(AtenTensorHandle hist, AtenTensorHandle bin_edges, AtenTensorHandle self, int64_t bins, const double** range, int64_t range_len_, AtenTensorHandle* weight, int32_t density); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_histogram_bin_ct(AtenTensorHandle self, int64_t bins, const double** range, int64_t range_len_, AtenTensorHandle* weight, int32_t density, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__histogramdd_from_bin_cts(AtenTensorHandle self, const int64_t* bins, int64_t bins_len_, const double** range, int64_t range_len_, AtenTensorHandle* weight, int32_t density, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__histogramdd_from_bin_tensors(AtenTensorHandle self, const AtenTensorHandle* bins, int64_t bins_len_, AtenTensorHandle* weight, int32_t density, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_min(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_min_unary_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_unary_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sort_values(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t dim, int32_t descending); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sort_values_stable(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int32_t* stable, int64_t dim, int32_t descending); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sort(AtenTensorHandle self, int64_t dim, int32_t descending, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_argsort_stable(AtenTensorHandle self, int32_t stable, int64_t dim, int32_t descending, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_topk_values(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t k, int64_t dim, int32_t largest, int32_t sorted); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_all_all_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_any_all_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_renorm_out(AtenTensorHandle out, AtenTensorHandle self, double p, int64_t dim, double maxnorm); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unfold(AtenTensorHandle self, int64_t dimension, int64_t size, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unfold_backward(AtenTensorHandle grad_in, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t dim, int64_t size, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_alias(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__amp_foreach_non_finite_check_and_unscale_(const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle found_inf, AtenTensorHandle inv_scale); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__amp_update_scale_(AtenTensorHandle self, AtenTensorHandle growth_tracker, AtenTensorHandle found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_add__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_add__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_add__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_add__Tensor(const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sub__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sub__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sub__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_mul__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_mul__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_mul__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_mul__Tensor(const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_div__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_div__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_div__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_div__Tensor(const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_clamp_max__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_clamp_max__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_clamp_max__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_clamp_min__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_clamp_min__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_clamp_min__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_maximum__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_maximum__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_maximum__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_minimum__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_minimum__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_minimum__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_addcdiv__Scalar(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_addcdiv__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_addcdiv__Tensor(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, AtenTensorHandle scalars); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_addcmul__Scalar(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_addcmul__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_addcmul__Tensor(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, AtenTensorHandle scalars); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_abs_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_acos_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_asin_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_atan_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_ceil_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_cos_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_cosh_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_erf_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_erfc_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_exp_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_expm1_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_floor_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_frac_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_lerp__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensors1, int64_t tensors1_len_, const AtenTensorHandle* weights, int64_t weights_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_lerp__Scalar(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensors1, int64_t tensors1_len_, double weight); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_lgamma_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_log_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_log10_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_log1p_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_log2_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_neg_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_pow__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* exponent, int64_t exponent_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_pow__Scalar(const AtenTensorHandle* self, int64_t self_len_, double exponent); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_pow__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* exponent, int64_t exponent_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_reciprocal_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_round_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sigmoid_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sign_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sin_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sinh_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sqrt_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_tan_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_tanh_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_trunc_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_zero_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_copy_(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* src, int64_t src_len_, int32_t non_blocking); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bucketize_Tensor(AtenTensorHandle self, AtenTensorHandle boundaries, int32_t out_int32, int32_t right, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bucketize_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle boundaries, int32_t out_int32, int32_t right); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bucketize_Scalar(double self, AtenTensorHandle boundaries, int32_t out_int32, int32_t right, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_searchsorted_Tensor(AtenTensorHandle sorted_sequence, AtenTensorHandle self, int32_t out_int32, int32_t right, const char** side, AtenTensorHandle* sorter, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_searchsorted_Tensor_out(AtenTensorHandle out, AtenTensorHandle sorted_sequence, AtenTensorHandle self, int32_t out_int32, int32_t right, const char** side, AtenTensorHandle* sorter); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_searchsorted_Scalar(AtenTensorHandle sorted_sequence, double self, int32_t out_int32, int32_t right, const char** side, AtenTensorHandle* sorter, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_searchsorted_Scalar_out(AtenTensorHandle out, AtenTensorHandle sorted_sequence, double self, int32_t out_int32, int32_t right, const char** side, AtenTensorHandle* sorter); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__convert_indices_from_coo_to_csr_out(AtenTensorHandle out, AtenTensorHandle self, int64_t size, int32_t out_int32); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__convert_indices_from_csr_to_coo_out(AtenTensorHandle out, AtenTensorHandle crow_indices, AtenTensorHandle col_indices, int32_t out_int32, int32_t transpose); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mse_loss_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mse_loss_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mse_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_multi_margin_loss_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, double p, double margin, AtenTensorHandle* weight, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_multi_margin_loss(AtenTensorHandle self, AtenTensorHandle target, double p, double margin, AtenTensorHandle* weight, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_multi_margin_loss_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, double p, double margin, AtenTensorHandle* weight, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_multi_margin_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, double p, double margin, AtenTensorHandle* weight, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_multilabel_margin_loss_forward_output(AtenTensorHandle output, AtenTensorHandle is_target, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_multilabel_margin_loss_forward(AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_multilabel_margin_loss_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle is_target); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_multilabel_margin_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle is_target, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nll_loss_forward_output(AtenTensorHandle output, AtenTensorHandle total_weight, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, int64_t ignore_index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nll_loss_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, int64_t ignore_index, AtenTensorHandle total_weight); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nll_loss2d_forward_output(AtenTensorHandle output, AtenTensorHandle total_weight, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, int64_t ignore_index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nll_loss2d_forward(AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, int64_t ignore_index, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nll_loss2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, int64_t ignore_index, AtenTensorHandle total_weight); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nll_loss2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, int64_t ignore_index, AtenTensorHandle total_weight, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_smooth_l1_loss_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double beta); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_smooth_l1_loss_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double beta); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_smooth_l1_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double beta, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_huber_loss_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double delta); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_huber_loss(AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double delta, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_huber_loss_backward_out(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double delta); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_huber_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double delta, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_soft_margin_loss_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_soft_margin_loss(AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_soft_margin_loss_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_soft_margin_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_elu_out(AtenTensorHandle out, AtenTensorHandle self, double alpha, double scale, double input_scale); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_elu_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, double alpha, double scale, double input_scale, int32_t is_result, AtenTensorHandle self_or_result); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_glu_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_glu_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_glu_backward(AtenTensorHandle grad_output, AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_glu_jvp(AtenTensorHandle glu, AtenTensorHandle x, AtenTensorHandle dx, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_glu_backward_jvp(AtenTensorHandle grad_x, AtenTensorHandle grad_glu, AtenTensorHandle x, AtenTensorHandle dgrad_glu, AtenTensorHandle dx, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardsigmoid_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardsigmoid_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardtanh_out(AtenTensorHandle out, AtenTensorHandle self, double min_val, double max_val); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardtanh(AtenTensorHandle self, double min_val, double max_val, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardtanh_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, double min_val, double max_val); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardtanh_backward(AtenTensorHandle grad_output, AtenTensorHandle self, double min_val, double max_val, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardtanh_(AtenTensorHandle self, double min_val, double max_val, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardswish_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardswish(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardswish_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardswish_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_leaky_relu_out(AtenTensorHandle out, AtenTensorHandle self, double negative_slope); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_leaky_relu_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, double negative_slope, int32_t self_is_result); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_log_sigmoid_forward_output(AtenTensorHandle output, AtenTensorHandle buffer, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_log_sigmoid_forward(AtenTensorHandle self, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_log_sigmoid_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle buffer); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_log_sigmoid_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle buffer, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rrelu_with_noise_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle noise, double lower, double upper, int32_t training, int32_t self_is_result, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_softplus_out(AtenTensorHandle out, AtenTensorHandle self, double beta, double threshold); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_softplus_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, double beta, double threshold); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_softshrink_out(AtenTensorHandle out, AtenTensorHandle self, double lambd); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_softshrink_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, double lambd); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_adaptive_avg_pool2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__adaptive_avg_pool2d(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__adaptive_avg_pool2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_adaptive_avg_pool3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__adaptive_avg_pool3d(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_adaptive_avg_pool3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__adaptive_avg_pool3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_adaptive_max_pool2d_out(AtenTensorHandle out, AtenTensorHandle indices, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_adaptive_max_pool2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle indices); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_adaptive_max_pool3d_out(AtenTensorHandle out, AtenTensorHandle indices, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_adaptive_max_pool3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle indices); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_avg_pool2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_avg_pool2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_avg_pool3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_avg_pool3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fractional_max_pool2d_output(AtenTensorHandle output, AtenTensorHandle indices, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle random_samples); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fractional_max_pool2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle indices); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fractional_max_pool3d_output(AtenTensorHandle output, AtenTensorHandle indices, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle random_samples); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fractional_max_pool3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle indices); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fractional_max_pool3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle indices, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_pool2d_with_indices_out(AtenTensorHandle out, AtenTensorHandle indices, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_pool2d_with_indices_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle indices); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_pool3d_with_indices_out(AtenTensorHandle out, AtenTensorHandle indices, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_pool3d_with_indices(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_pool3d_with_indices_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle indices); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_pool3d_with_indices_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle indices, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_unpool2d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle indices, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_unpool2d(AtenTensorHandle self, AtenTensorHandle indices, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_unpool3d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle indices, const int64_t* output_size, int64_t output_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_unpool3d(AtenTensorHandle self, AtenTensorHandle indices, const int64_t* output_size, int64_t output_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_reflection_pad1d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_reflection_pad1d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_reflection_pad2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_reflection_pad2d(AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_reflection_pad2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_reflection_pad2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_reflection_pad3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_reflection_pad3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_replication_pad1d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_replication_pad1d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_replication_pad2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_replication_pad2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_replication_pad2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_replication_pad3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_replication_pad3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_replication_pad3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_linear1d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, int32_t align_corners, double* scales); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_linear1d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_bilinear2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_bilinear2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__upsample_bilinear2d_aa_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__upsample_bilinear2d_aa_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_bicubic2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_bicubic2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__upsample_bicubic2d_aa_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__upsample_bicubic2d_aa_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_trilinear3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, int32_t align_corners, double* scales_d, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_trilinear3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_d, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_nearest1d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, double* scales); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__upsample_nearest_exact1d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, double* scales); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_nearest1d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, double* scales); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__upsample_nearest_exact1d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, double* scales); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_nearest2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__upsample_nearest_exact2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_nearest2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__upsample_nearest_exact2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_nearest3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, double* scales_d, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__upsample_nearest_exact3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, double* scales_d, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_upsample_nearest3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, double* scales_d, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__upsample_nearest_exact3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, double* scales_d, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slow_conv_transpose2d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* dilation, int64_t dilation_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slow_conv_transpose3d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* dilation, int64_t dilation_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slow_conv_transpose3d(AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* dilation, int64_t dilation_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__slow_conv2d_forward_output(AtenTensorHandle output, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__slow_conv2d_forward(AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__slow_conv2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_weight, AtenTensorHandle grad_bias, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__slow_conv2d_backward_output_mask(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slow_conv3d_forward_output(AtenTensorHandle output, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slow_conv3d_forward(AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slow_conv_dilated2d(AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slow_conv_dilated3d(AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_col2im_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* dilation, int64_t dilation_len_, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_col2im(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* dilation, int64_t dilation_len_, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_im2col_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* dilation, int64_t dilation_len_, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_im2col(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* dilation, int64_t dilation_len_, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fft_fftfreq(int64_t n, double d, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fft_fftfreq_out(AtenTensorHandle out, int64_t n, double d); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fft_rfftfreq(int64_t n, double d, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fft_rfftfreq_out(AtenTensorHandle out, int64_t n, double d); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_cholesky_ex_L(AtenTensorHandle L, AtenTensorHandle info, AtenTensorHandle self, int32_t upper, int32_t check_errors); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_cross_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_lu_factor_ex_out(AtenTensorHandle LU, AtenTensorHandle pivots, AtenTensorHandle info, AtenTensorHandle A, int32_t pivot, int32_t check_errors); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_lu_out(AtenTensorHandle P, AtenTensorHandle L, AtenTensorHandle U, AtenTensorHandle A, int32_t pivot); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_lu_solve_out(AtenTensorHandle out, AtenTensorHandle LU, AtenTensorHandle pivots, AtenTensorHandle B, int32_t left, int32_t adjoint); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__linalg_det_result(AtenTensorHandle result, AtenTensorHandle LU, AtenTensorHandle pivots, AtenTensorHandle A); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_ldl_factor_ex_out(AtenTensorHandle LD, AtenTensorHandle pivots, AtenTensorHandle info, AtenTensorHandle self, int32_t hermitian, int32_t check_errors); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_ldl_solve_out(AtenTensorHandle out, AtenTensorHandle LD, AtenTensorHandle pivots, AtenTensorHandle B, int32_t hermitian); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_lstsq(AtenTensorHandle self, AtenTensorHandle b, double* rcond, const char** driver, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_lstsq_out(AtenTensorHandle solution, AtenTensorHandle residuals, AtenTensorHandle rank, AtenTensorHandle singular_values, AtenTensorHandle self, AtenTensorHandle b, double* rcond, const char** driver); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_matrix_exp(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__linalg_slogdet_sign(AtenTensorHandle sign, AtenTensorHandle logabsdet, AtenTensorHandle LU, AtenTensorHandle pivots, AtenTensorHandle A); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_eig(AtenTensorHandle self, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_eig_out(AtenTensorHandle eigenvalues, AtenTensorHandle eigenvectors, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__linalg_eigvals(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_eigvals_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__linalg_eigh_eigenvalues(AtenTensorHandle eigenvalues, AtenTensorHandle eigenvectors, AtenTensorHandle A, const char* UPLO, int32_t compute_v); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_householder_product(AtenTensorHandle input, AtenTensorHandle tau, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_householder_product_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle tau); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_inv_ex_inverse(AtenTensorHandle inverse, AtenTensorHandle info, AtenTensorHandle A, int32_t check_errors); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_vector_norm_out(AtenTensorHandle out, AtenTensorHandle self, double ord, const int64_t** dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__linalg_svd_U(AtenTensorHandle U, AtenTensorHandle S, AtenTensorHandle Vh, AtenTensorHandle A, int32_t full_matrices, int32_t compute_uv, const char** driver); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_pinv_atol_rtol_tensor(AtenTensorHandle self, AtenTensorHandle* atol, AtenTensorHandle* rtol, int32_t hermitian, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_pinv_atol_rtol_tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle* atol, AtenTensorHandle* rtol, int32_t hermitian); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__linalg_solve_ex_result(AtenTensorHandle result, AtenTensorHandle LU, AtenTensorHandle pivots, AtenTensorHandle info, AtenTensorHandle A, AtenTensorHandle B, int32_t left, int32_t check_errors); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_qr_out(AtenTensorHandle Q, AtenTensorHandle R, AtenTensorHandle A, const char* mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_parallel_materialize(AtenTensorHandle self, int64_t num_parallel, int32_t skip_first, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_optional_intlist(AtenTensorHandle values, const int64_t** addends, int64_t addends_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_optional_filled_intlist(AtenTensorHandle values, const int64_t** addends, int64_t addends_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_optional_floatlist(AtenTensorHandle values, const double** addends, int64_t addends_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_warn_in_autograd(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_autograd_multiple_dispatch_fullcoverage(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_autograd_multiple_dispatch_view(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_autograd_multiple_dispatch_view_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_segment_reduce(AtenTensorHandle data, const char* reduce, AtenTensorHandle* lengths, AtenTensorHandle* indices, AtenTensorHandle* offsets, int64_t axis, int32_t unsafe, double* initial, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__segment_reduce_backward(AtenTensorHandle grad, AtenTensorHandle output, AtenTensorHandle data, const char* reduce, AtenTensorHandle* lengths, AtenTensorHandle* offsets, int64_t axis, double* initial, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_tensor_from_tensor_list(const AtenTensorHandle* list, int64_t list_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fw_primal_copy(AtenTensorHandle self, int64_t level, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__make_dual_copy(AtenTensorHandle primal, AtenTensorHandle tangent, int64_t level, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_as_real_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_as_complex_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__conj_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__neg_view_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_as_strided_copy(AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int64_t* storage_offset, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_broadcast_to_copy(AtenTensorHandle self, const int64_t* size, int64_t size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_diagonal_copy(AtenTensorHandle self, int64_t offset, int64_t dim1, int64_t dim2, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_expand_copy(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t implicit, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_permute_copy(AtenTensorHandle self, const int64_t* dims, int64_t dims_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__reshape_alias_copy(AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_select_copy_int(AtenTensorHandle self, int64_t dim, int64_t index, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_detach_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slice_copy_Tensor(AtenTensorHandle self, int64_t dim, int64_t* start, int64_t* end, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_squeeze_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_squeeze_copy_dim(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_squeeze_copy_dims(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_t_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_transpose_copy_int(AtenTensorHandle self, int64_t dim0, int64_t dim1, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unsqueeze_copy(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__indices_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__values_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_indices_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_values_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_crow_indices_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_col_indices_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_ccol_indices_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_row_indices_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unbind_copy_int_out(const AtenTensorHandle* out, int64_t out_len_, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_split_copy_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, AtenTensorHandle self, int64_t split_size, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_split_with_sizes_copy_out(const AtenTensorHandle* out, int64_t out_len_, AtenTensorHandle self, const int64_t* split_sizes, int64_t split_sizes_len_, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_copy(AtenTensorHandle self, const int64_t* size, int64_t size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_copy_dtype(AtenTensorHandle self, int32_t dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unfold_copy(AtenTensorHandle self, int64_t dimension, int64_t size, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_alias_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__transformer_encoder_layer_fwd(AtenTensorHandle src, int64_t embed_dim, int64_t num_heads, AtenTensorHandle qkv_weight, AtenTensorHandle qkv_bias, AtenTensorHandle proj_weight, AtenTensorHandle proj_bias, int32_t use_gelu, int32_t norm_first, double eps, AtenTensorHandle norm_weight_1, AtenTensorHandle norm_bias_1, AtenTensorHandle norm_weight_2, AtenTensorHandle norm_bias_2, AtenTensorHandle ffn_weight_1, AtenTensorHandle ffn_bias_1, AtenTensorHandle ffn_weight_2, AtenTensorHandle ffn_bias_2, AtenTensorHandle* mask, int64_t* mask_type, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__native_multi_head_attention(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, int64_t embed_dim, int64_t num_head, AtenTensorHandle qkv_weight, AtenTensorHandle qkv_bias, AtenTensorHandle proj_weight, AtenTensorHandle proj_bias, AtenTensorHandle* mask, int32_t need_weights, int32_t average_attn_weights, int64_t* mask_type, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fused_sdp_choice(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* attn_mask, double dropout_p, int32_t is_causal, double* scale, int64_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__scaled_dot_product_flash_attention_for_cpu(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, double dropout_p, int32_t is_causal, AtenTensorHandle* attn_mask, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__scaled_dot_product_flash_attention_for_cpu_backward(AtenTensorHandle grad_out, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle out, AtenTensorHandle logsumexp, double dropout_p, int32_t is_causal, AtenTensorHandle* attn_mask, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foobar(AtenTensorHandle self, int32_t arg1, int32_t arg2, int32_t arg3, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__new_zeros_with_same_feature_meta_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other, int64_t self_num_batch_dims); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cudnn_ctc_loss_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle log_probs, AtenTensorHandle targets, const int64_t* input_lengths, int64_t input_lengths_len_, const int64_t* target_lengths, int64_t target_lengths_len_, int64_t blank, int32_t deterministic, int32_t zero_infinity); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cudnn_rnn_flatten_weight_out(AtenTensorHandle out, const AtenTensorHandle* weight_arr, int64_t weight_arr_len_, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int32_t batch_first, int32_t bidirectional); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cudnn_rnn_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle out4, AtenTensorHandle input, const AtenTensorHandle* weight, int64_t weight_len_, int64_t weight_stride0, AtenTensorHandle* weight_buf, AtenTensorHandle hx, AtenTensorHandle* cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int32_t batch_first, double dropout, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, AtenTensorHandle* dropout_state); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cudnn_rnn_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, const AtenTensorHandle* out3, int64_t out3_len_, AtenTensorHandle input, const AtenTensorHandle* weight, int64_t weight_len_, int64_t weight_stride0, AtenTensorHandle weight_buf, AtenTensorHandle hx, AtenTensorHandle* cx, AtenTensorHandle output, AtenTensorHandle* grad_output, AtenTensorHandle* grad_hy, AtenTensorHandle* grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int32_t batch_first, double dropout, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, AtenTensorHandle* dropout_state, AtenTensorHandle reserve, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cudnn_init_dropout_state_out(AtenTensorHandle out, double dropout, int32_t train, int64_t dropout_seed); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__masked_scale_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask, double scale); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_dropout_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input, double p, int32_t* train); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_dropout_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle mask, double scale); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__conj_physical_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__add_relu_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_add_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_affine_grid_generator_out(AtenTensorHandle out, AtenTensorHandle theta, const int64_t* size, int64_t size_len_, int32_t align_corners); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_functorch_fallback_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bartlett_window_out(AtenTensorHandle out, int64_t window_length); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bartlett_window_periodic_out(AtenTensorHandle out, int64_t window_length, int32_t periodic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_quantized_batch_norm_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle mean, AtenTensorHandle var, double eps, double output_scale, int64_t output_zero_point); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_binary_cross_entropy_with_logits_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, AtenTensorHandle* pos_weight, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bincount_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle* weights, int64_t minlength); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_blackman_window_out(AtenTensorHandle out, int64_t window_length); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_blackman_window_periodic_out(AtenTensorHandle out, int64_t window_length, int32_t periodic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_block_diag_out(AtenTensorHandle out, const AtenTensorHandle* tensors, int64_t tensors_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_constant_pad_nd_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* pad, int64_t pad_len_, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_convolution_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_convolution_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle weight, const int64_t** bias_sizes, int64_t bias_sizes_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_convolution_overrideable_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_convolution_backward_overrideable_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle weight, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__convolution_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, int32_t benchmark, int32_t deterministic, int32_t cudnn_enabled, int32_t allow_tf32); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_conv_tbc_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle bias, int64_t pad); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_copy_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle src, int32_t non_blocking); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__copy_from_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle dst, int32_t non_blocking); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__copy_from_and_resize_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle dst); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_count_nonzero_dim_IntList_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_count_nonzero_out(AtenTensorHandle out, AtenTensorHandle self, int64_t* dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cudnn_affine_grid_generator_out(AtenTensorHandle out, AtenTensorHandle theta, int64_t N, int64_t C, int64_t H, int64_t W); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cudnn_affine_grid_generator_backward_out(AtenTensorHandle out, AtenTensorHandle grad, int64_t N, int64_t C, int64_t H, int64_t W); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cudnn_batch_norm_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, int32_t training, double exponential_average_factor, double epsilon); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cudnn_batch_norm_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input, AtenTensorHandle grad_output, AtenTensorHandle weight, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, AtenTensorHandle* save_mean, AtenTensorHandle* save_var, double epsilon, AtenTensorHandle reserveSpace); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cudnn_convolution_transpose_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic, int32_t allow_tf32); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__mps_convolution_transpose_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mps_convolution_transpose_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, AtenTensorHandle grad_output, AtenTensorHandle weight, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cudnn_convolution_relu_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cudnn_convolution_add_relu_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle z, double* alpha, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cudnn_grid_sampler_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle grid); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_cudnn_grid_sampler_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, AtenTensorHandle grid, AtenTensorHandle grad_output); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__ctc_loss_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle log_probs, AtenTensorHandle targets, const int64_t* input_lengths, int64_t input_lengths_len_, const int64_t* target_lengths, int64_t target_lengths_len_, int64_t blank, int32_t zero_infinity); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__ctc_loss_Tensor_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle log_probs, AtenTensorHandle targets, AtenTensorHandle input_lengths, AtenTensorHandle target_lengths, int64_t blank, int32_t zero_infinity); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__ctc_loss_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle log_probs, AtenTensorHandle targets, const int64_t* input_lengths, int64_t input_lengths_len_, const int64_t* target_lengths, int64_t target_lengths_len_, AtenTensorHandle neg_log_likelihood, AtenTensorHandle log_alpha, int64_t blank, int32_t zero_infinity); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_diag_embed_out(AtenTensorHandle out, AtenTensorHandle self, int64_t offset, int64_t dim1, int64_t dim2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_diagonal_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t offset, int64_t dim1, int64_t dim2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_div_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_div_Scalar_mode_out(AtenTensorHandle out, AtenTensorHandle self, double other, const char** rounding_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_embedding_out(AtenTensorHandle out, AtenTensorHandle weight, AtenTensorHandle indices, int64_t padding_idx, int32_t scale_grad_by_freq, int32_t sparse); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_embedding_dense_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle indices, int64_t num_weights, int64_t padding_idx, int32_t scale_grad_by_freq); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_embedding_renorm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle indices, double max_norm, double norm_type); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_embedding_renorm(AtenTensorHandle self, AtenTensorHandle indices, double max_norm, double norm_type, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__embedding_bag_forward_only_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, int32_t scale_grad_by_freq, int64_t mode, int32_t sparse, AtenTensorHandle* per_sample_weights, int32_t include_last_offset, int64_t padding_idx); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__embedding_bag_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, int32_t scale_grad_by_freq, int64_t mode, int32_t sparse, AtenTensorHandle* per_sample_weights, int32_t include_last_offset, int64_t padding_idx); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__embedding_bag_dense_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle indices, AtenTensorHandle offset2bag, AtenTensorHandle bag_size, AtenTensorHandle maximum_indices, int64_t num_weights, int32_t scale_grad_by_freq, int64_t mode, AtenTensorHandle* per_sample_weights, int64_t padding_idx); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__embedding_bag_per_sample_weights_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, AtenTensorHandle offset2bag, int64_t mode, int64_t padding_idx); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_empty_permuted_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_, const int64_t* physical_layout, int64_t physical_layout_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_new_empty_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_new_empty_strided_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_new_full_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, double fill_value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_new_zeros_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_new_ones_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__empty_affine_quantized_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_, double scale, int64_t zero_point, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__empty_per_channel_affine_quantized_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_, AtenTensorHandle scales, AtenTensorHandle zero_points, int64_t axis, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_resize_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_resize(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__resize_output_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t device, int32_t device_index_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__resize_output(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t device, int32_t device_index_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_empty_quantized_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_, AtenTensorHandle qtensor, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_empty_like_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_empty_strided_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fill_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fill_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_floor_divide_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_full_like_out(AtenTensorHandle out, AtenTensorHandle self, double fill_value, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_from_file_out(AtenTensorHandle out, const char* filename, int32_t* shared, int64_t* size); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_grid_sampler_2d_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_grid_sampler_2d_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__grid_sampler_2d_cpu_fallback_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_grid_sampler_3d_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_grid_sampler_3d_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hann_window_out(AtenTensorHandle out, int64_t window_length); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hann_window_periodic_out(AtenTensorHandle out, int64_t window_length, int32_t periodic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hamming_window_out(AtenTensorHandle out, int64_t window_length); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hamming_window_periodic_out(AtenTensorHandle out, int64_t window_length, int32_t periodic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hamming_window_periodic_alpha_out(AtenTensorHandle out, int64_t window_length, int32_t periodic, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hamming_window_periodic_alpha_beta_out(AtenTensorHandle out, int64_t window_length, int32_t periodic, double alpha, double beta); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_kaiser_window_out(AtenTensorHandle out, int64_t window_length); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_kaiser_window_periodic_out(AtenTensorHandle out, int64_t window_length, int32_t periodic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_kaiser_window_beta_out(AtenTensorHandle out, int64_t window_length, int32_t periodic, double beta); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_group_norm_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_group_norm_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle rstd, AtenTensorHandle* weight, int64_t N, int64_t C, int64_t HxW, int64_t group, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_put_out(AtenTensorHandle out, AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__index_put_impl_out(AtenTensorHandle out, AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, int32_t unsafe); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__index_put_impl(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, int32_t unsafe, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_isnan_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_layer_norm_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input, const int64_t* normalized_shape, int64_t normalized_shape_len_, AtenTensorHandle* weight, AtenTensorHandle* bias, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_layer_norm_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle grad_out, AtenTensorHandle input, const int64_t* normalized_shape, int64_t normalized_shape_len_, AtenTensorHandle mean, AtenTensorHandle rstd, AtenTensorHandle* weight, AtenTensorHandle* bias, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linear_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, AtenTensorHandle grad_output, AtenTensorHandle weight, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_linear_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_linear_backward_input_out(AtenTensorHandle out, const int64_t* input_size, int64_t input_size_len_, AtenTensorHandle grad_output, AtenTensorHandle weight); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_linear_backward_weights_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle weight, int32_t bias_defined); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_linear_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, AtenTensorHandle grad_output, AtenTensorHandle weight, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_matmul_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle grad, AtenTensorHandle self, AtenTensorHandle other, const int32_t* mask, int64_t mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__aminmax_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__aminmax_dim_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_max_pool2d_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_max_pool2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_max_pool2d_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle output, AtenTensorHandle input, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_max_pool3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_max_pool3d_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle output, AtenTensorHandle input, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_quantized_max_pool1d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_quantized_max_pool2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_quantized_max_pool3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_median_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_nanmedian_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__mps_convolution_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mps_convolution_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, AtenTensorHandle grad_output, AtenTensorHandle weight, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_convolution_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_rnn_layer_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle input, AtenTensorHandle weight0, AtenTensorHandle weight1, AtenTensorHandle weight2, AtenTensorHandle weight3, AtenTensorHandle hx_, AtenTensorHandle cx_, int32_t reverse, const int64_t* batch_sizes, int64_t batch_sizes_len_, int64_t mode, int64_t hidden_size, int64_t num_layers, int32_t has_biases, int32_t bidirectional, int32_t batch_first, int32_t train); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_rnn_layer_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle out4, AtenTensorHandle out5, AtenTensorHandle out6, AtenTensorHandle input, AtenTensorHandle weight1, AtenTensorHandle weight2, AtenTensorHandle weight3, AtenTensorHandle weight4, AtenTensorHandle hx_, AtenTensorHandle cx_tmp, AtenTensorHandle output, AtenTensorHandle hy_, AtenTensorHandle cy_, AtenTensorHandle* grad_output, AtenTensorHandle* grad_hy, AtenTensorHandle* grad_cy, int32_t reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, int32_t has_biases, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, int32_t batch_first, AtenTensorHandle workspace); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_miopen_batch_norm_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, int32_t training, double exponential_average_factor, double epsilon); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_miopen_batch_norm_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input, AtenTensorHandle grad_output, AtenTensorHandle weight, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, AtenTensorHandle* save_mean, AtenTensorHandle* save_var, double epsilon); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_miopen_convolution_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_miopen_convolution_transpose_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_miopen_depthwise_convolution_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_miopen_rnn_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle out4, AtenTensorHandle input, const AtenTensorHandle* weight, int64_t weight_len_, int64_t weight_stride0, AtenTensorHandle hx, AtenTensorHandle* cx, int64_t mode, int64_t hidden_size, int64_t num_layers, int32_t batch_first, double dropout, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, AtenTensorHandle* dropout_state); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_miopen_rnn_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, const AtenTensorHandle* out3, int64_t out3_len_, AtenTensorHandle input, const AtenTensorHandle* weight, int64_t weight_len_, int64_t weight_stride0, AtenTensorHandle weight_buf, AtenTensorHandle hx, AtenTensorHandle* cx, AtenTensorHandle output, AtenTensorHandle* grad_output, AtenTensorHandle* grad_hy, AtenTensorHandle* grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, int32_t batch_first, double dropout, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, AtenTensorHandle* dropout_state, AtenTensorHandle reserve, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_sparse_matmul_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mul_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__native_batch_norm_legit_functional(AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle running_mean, AtenTensorHandle running_var, int32_t training, double momentum, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__native_batch_norm_legit_no_training_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle running_mean, AtenTensorHandle running_var, double momentum, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_batch_norm_stats_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_batch_norm_gather_stats_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle invstd, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, double momentum, double eps, int64_t count); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_batch_norm_gather_stats_with_counts_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle invstd, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, double momentum, double eps, AtenTensorHandle counts); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_batch_norm_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, AtenTensorHandle* save_mean, AtenTensorHandle* save_invstd, int32_t train, double eps, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_batch_norm_backward_reduce_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle invstd, AtenTensorHandle* weight, int32_t input_g, int32_t weight_g, int32_t bias_g); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_batch_norm_backward_elemt_out(AtenTensorHandle out, AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle invstd, AtenTensorHandle* weight, AtenTensorHandle sum_dy, AtenTensorHandle sum_dy_xmu, AtenTensorHandle count); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_batch_norm_update_stats_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, double momentum); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nnpack_spatial_convolution_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_ones_like_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__euclidean_dist_out(AtenTensorHandle out, AtenTensorHandle x1, AtenTensorHandle x2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cdist_forward_out(AtenTensorHandle out, AtenTensorHandle x1, AtenTensorHandle x2, double p, int64_t* compute_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cdist_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle x1, AtenTensorHandle x2, double p, AtenTensorHandle cdist); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__pdist_forward_out(AtenTensorHandle out, AtenTensorHandle self, double p); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__pdist_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle self, double p, AtenTensorHandle pdist); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_pixel_shuffle_out(AtenTensorHandle out, AtenTensorHandle self, int64_t upscale_factor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_pixel_unshuffle_out(AtenTensorHandle out, AtenTensorHandle self, int64_t downscale_factor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_channel_shuffle_out(AtenTensorHandle out, AtenTensorHandle self, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__pin_memory_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* device, int32_t device_index_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_scalar_tensor_out(AtenTensorHandle out, double s); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rand_like_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randint_like_out(AtenTensorHandle out, AtenTensorHandle self, int64_t high, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randint_like_low_dtype_out(AtenTensorHandle out, AtenTensorHandle self, int64_t low, int64_t high, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_randn_like_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_repeat_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* repeats, int64_t repeats_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_repeat_interleave_Tensor_out(AtenTensorHandle out, AtenTensorHandle repeats, int64_t* output_size); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__mkldnn_reshape_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* shape, int64_t shape_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_relu_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_select_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t dim, int64_t index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_celu_out(AtenTensorHandle out, AtenTensorHandle self, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slice_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t dim, int64_t start, int64_t end, int64_t step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slice_scatter_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle src, int64_t dim, int64_t* start, int64_t* end, int64_t step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_select_scatter_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle src, int64_t dim, int64_t index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_diagonal_scatter_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle src, int64_t offset, int64_t dim1, int64_t dim2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_as_strided_scatter_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle src, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int64_t* storage_offset); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unsafe_split_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, AtenTensorHandle self, int64_t split_size, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unsafe_split_with_sizes_out(const AtenTensorHandle* out, int64_t out_len_, AtenTensorHandle self, const int64_t* split_sizes, int64_t split_sizes_len_, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sum_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_std_mean_correction_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_prod_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__mkldnn_transpose_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim0, int64_t dim1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_flip_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dims, int64_t dims_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_roll_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* shifts, int64_t shifts_len_, const int64_t* dims, int64_t dims_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rot90_out(AtenTensorHandle out, AtenTensorHandle self, int64_t k, const int64_t* dims, int64_t dims_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__transform_bias_rescale_qkv_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle qkv, AtenTensorHandle qkv_bias, int64_t num_heads); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_tensor_from_mask_out(AtenTensorHandle out, AtenTensorHandle t, AtenTensorHandle mask, int32_t mask_check); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_from_padded_out(AtenTensorHandle out, AtenTensorHandle padded, AtenTensorHandle cpu_nested_shape_example, int32_t fuse_transform_0213); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_tensor_size_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_tensor_strides_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_tensor_storage_offsets_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_from_padded_and_nested_example_out(AtenTensorHandle out, AtenTensorHandle padded, AtenTensorHandle nt_example); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_view_from_buffer_copy_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle nested_size, AtenTensorHandle nested_strides, AtenTensorHandle offsets); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_view_from_jagged_copy_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle offsets, AtenTensorHandle dummy, AtenTensorHandle* lengths, int64_t ragged_idx); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_get_values_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__trilinear_out(AtenTensorHandle out, AtenTensorHandle i1, AtenTensorHandle i2, AtenTensorHandle i3, const int64_t* expand1, int64_t expand1_len_, const int64_t* expand2, int64_t expand2_len_, const int64_t* expand3, int64_t expand3_len_, const int64_t* sumdim, int64_t sumdim_len_, int64_t unroll_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__unique_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, int32_t sorted, int32_t return_inverse); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unique_dim_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, int64_t dim, int32_t sorted, int32_t return_inverse, int32_t return_counts); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unique_consecutive_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, int32_t return_inverse, int32_t return_counts, int64_t* dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unique_dim_consecutive_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, int64_t dim, int32_t return_inverse, int32_t return_counts); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__unique2_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, int32_t sorted, int32_t return_inverse, int32_t return_counts); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__unsafe_view_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_var_mean_correction_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__weight_norm_interface_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle v, AtenTensorHandle g, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__weight_norm_interface_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle grad_w, AtenTensorHandle saved_v, AtenTensorHandle saved_g, AtenTensorHandle saved_norms, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__efficientzerotensor_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_zeros_like_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__standard_gamma_grad_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle output); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__dirichlet_grad_out(AtenTensorHandle out, AtenTensorHandle x, AtenTensorHandle alpha, AtenTensorHandle total); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_norm_out(AtenTensorHandle out, AtenTensorHandle self, double p); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_native_norm_ScalarOpt_dim_dtype_out(AtenTensorHandle out, AtenTensorHandle self, double* p, const int64_t* dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_sum_dim_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_sum_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_csr_sum_dim_dtype_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_csr_prod_dim_dtype_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_softmax_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t half_to_float); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_softmax_backward_data_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle output, int64_t dim, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_log_softmax_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t half_to_float); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_log_softmax_backward_data_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle output, int64_t dim, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__spdiags_out(AtenTensorHandle out, AtenTensorHandle diagonals, AtenTensorHandle offsets, const int64_t* shape, int64_t shape_len_, int32_t* layout); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_norm_ScalarOpt_dtype_out(AtenTensorHandle out, AtenTensorHandle self, double* p, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_norm_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double p); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_clone_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_resize_as_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle the_template, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_resize_as(AtenTensorHandle self, AtenTensorHandle the_template, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_resize_as_sparse_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle the_template); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_resize_as_sparse(AtenTensorHandle self, AtenTensorHandle the_template, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_zero_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_zero(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sub_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rsub_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rsub_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_addmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sparse_coo_tensor_size_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_coo_tensor_with_dims_out(AtenTensorHandle out, int64_t sparse_dim, int64_t dense_dim, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_coo_tensor_with_dims_and_tensors_out(AtenTensorHandle out, int64_t sparse_dim, int64_t dense_dim, const int64_t* size, int64_t size_len_, AtenTensorHandle indices, AtenTensorHandle values, int32_t* is_coalesced); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sparse_resize_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, int64_t sparse_dim, int64_t dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sparse_resize(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int64_t sparse_dim, int64_t dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sparse_resize_and_clear_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, int64_t sparse_dim, int64_t dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sparse_resize_and_clear(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int64_t sparse_dim, int64_t dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_sparse_mask_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_mask_projection_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask, int32_t accumulate_matches); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_dense_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* dtype, int32_t* masked_grad); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__coalesce_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__coalesced_out(AtenTensorHandle out, AtenTensorHandle self, int32_t coalesced); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__coalesced(AtenTensorHandle self, int32_t coalesced, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_copy_sparse_to_sparse_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle src, int32_t non_blocking); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_copy_sparse_to_sparse(AtenTensorHandle self, AtenTensorHandle src, int32_t non_blocking, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_sparse_sparse_dim_out(AtenTensorHandle out, AtenTensorHandle self, int64_t sparse_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_sparse_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* layout, const int64_t** blocksize, int64_t blocksize_len_, int64_t* dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_sparse_csr_out(AtenTensorHandle out, AtenTensorHandle self, int64_t* dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_sparse_csc_out(AtenTensorHandle out, AtenTensorHandle self, int64_t* dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_sparse_bsr_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* blocksize, int64_t blocksize_len_, int64_t* dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_sparse_bsc_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* blocksize, int64_t blocksize_len_, int64_t* dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_to_mkldnn_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_reorder_conv2d_weight_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, const int64_t** input_size, int64_t input_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_reorder_conv3d_weight_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_quantize_per_tensor_dynamic_out(AtenTensorHandle out, AtenTensorHandle self, int32_t dtype, int32_t reduce_range); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_quantize_per_tensor_out(AtenTensorHandle out, AtenTensorHandle self, double scale, int64_t zero_point, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_quantize_per_tensor_tensor_qparams_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_quantize_per_tensor_tensors_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* tensors, int64_t tensors_len_, AtenTensorHandle scales, AtenTensorHandle zero_points, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_quantize_per_channel_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle scales, AtenTensorHandle zero_points, int64_t axis, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_dequantize_self_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_dequantize_tensors_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* tensors, int64_t tensors_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_q_per_channel_scales_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_q_per_channel_zero_points_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_int_repr_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__make_per_tensor_quantized_tensor_out(AtenTensorHandle out, AtenTensorHandle self, double scale, int64_t zero_point); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__make_per_channel_quantized_tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fake_quantize_per_tensor_affine_cachemask_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, AtenTensorHandle fake_quant_enabled, int64_t quant_min, int64_t quant_max); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fake_quantize_learnable_per_tensor_affine_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t quant_min, int64_t quant_max, double grad_factor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_fake_quantize_per_channel_affine_cachemask_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fake_quantize_learnable_per_channel_affine_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fused_moving_avg_obs_fq_helper_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, AtenTensorHandle observer_on, AtenTensorHandle fake_quant_on, AtenTensorHandle running_min, AtenTensorHandle running_max, AtenTensorHandle scale, AtenTensorHandle zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int32_t per_row_fake_quant, int32_t symmetric_quant); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fused_moving_avg_obs_fq_helper_functional(AtenTensorHandle self, AtenTensorHandle observer_on, AtenTensorHandle fake_quant_on, AtenTensorHandle running_min, AtenTensorHandle running_max, AtenTensorHandle scale, AtenTensorHandle zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int32_t per_row_fake_quant, int32_t symmetric_quant, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4, AtenTensorHandle* ret5); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__to_copy_out(AtenTensorHandle out, AtenTensorHandle self, int32_t non_blocking, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__lstm_mps_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle out4, AtenTensorHandle out5, AtenTensorHandle input, const AtenTensorHandle* hx, int64_t hx_len_, const AtenTensorHandle* params, int64_t params_len_, int32_t has_biases, int64_t num_layers, double dropout, int32_t train, int32_t bidirectional, int32_t batch_first); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_lstm_mps_backward_out(AtenTensorHandle out0, const AtenTensorHandle* out1, int64_t out1_len_, const AtenTensorHandle* out2, int64_t out2_len_, AtenTensorHandle* grad_y, AtenTensorHandle* grad_hy, AtenTensorHandle* grad_cy, AtenTensorHandle z_state, AtenTensorHandle cell_state_fwd, AtenTensorHandle input, AtenTensorHandle layersOutputs, const AtenTensorHandle* hx, int64_t hx_len_, const AtenTensorHandle* params, int64_t params_len_, int32_t has_biases, int64_t num_layers, double dropout, int32_t train, int32_t bidirectional, int32_t batch_first); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__thnn_fused_lstm_cell_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input_gates, AtenTensorHandle hidden_gates, AtenTensorHandle cx, AtenTensorHandle* input_bias, AtenTensorHandle* hidden_bias); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__thnn_fused_lstm_cell_backward_impl_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle* grad_hy, AtenTensorHandle* grad_cy, AtenTensorHandle cx, AtenTensorHandle cy, AtenTensorHandle workspace, int32_t has_bias); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__thnn_fused_gru_cell_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input_gates, AtenTensorHandle hidden_gates, AtenTensorHandle hx, AtenTensorHandle* input_bias, AtenTensorHandle* hidden_bias); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__thnn_fused_gru_cell_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle out4, AtenTensorHandle grad_hy, AtenTensorHandle workspace, int32_t has_bias); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__pack_padded_sequence_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input, AtenTensorHandle lengths, int32_t batch_first); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_set_source_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle source); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_set_source_Tensor(AtenTensorHandle self, AtenTensorHandle source, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_set_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_set(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_lift_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_lift_fresh_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_fill_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_fill_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_masked_scatter_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle source); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__masked_softmax_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask, int64_t* dim, int64_t* mask_type); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__masked_softmax_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle output, AtenTensorHandle mask, int64_t* dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_put_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle index, AtenTensorHandle source, int32_t accumulate); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_fill_int_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_index_fill_int_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bitwise_and_Scalar_Tensor_out(AtenTensorHandle out, double self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bitwise_or_Scalar_Tensor_out(AtenTensorHandle out, double self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bitwise_xor_Scalar_Tensor_out(AtenTensorHandle out, double self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu___lshift___Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu___lshift___Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bitwise_left_shift_Scalar_Tensor_out(AtenTensorHandle out, double self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu___rshift___Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu___rshift___Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bitwise_right_shift_Scalar_Tensor_out(AtenTensorHandle out, double self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_tril_indices_out(AtenTensorHandle out, int64_t row, int64_t col, int64_t offset); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_triu_indices_out(AtenTensorHandle out, int64_t row, int64_t col, int64_t offset); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_trace_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__cholesky_solve_helper_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle A, int32_t upper); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_dist_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other, double p); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__histogramdd_bin_edges_out(const AtenTensorHandle* out, int64_t out_len_, AtenTensorHandle self, const int64_t* bins, int64_t bins_len_, const double** range, int64_t range_len_, AtenTensorHandle* weight, int32_t density); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__histogramdd_from_bin_cts_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* bins, int64_t bins_len_, const double** range, int64_t range_len_, AtenTensorHandle* weight, int32_t density); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__histogramdd_from_bin_tensors_out(AtenTensorHandle out, AtenTensorHandle self, const AtenTensorHandle* bins, int64_t bins_len_, AtenTensorHandle* weight, int32_t density); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_remainder_Scalar_Tensor_out(AtenTensorHandle out, double self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_argsort_stable_out(AtenTensorHandle out, AtenTensorHandle self, int32_t stable, int64_t dim, int32_t descending); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unfold_backward_out(AtenTensorHandle out, AtenTensorHandle grad_in, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t dim, int64_t size, int64_t step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__amp_foreach_non_finite_check_and_unscale_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle found_inf, AtenTensorHandle inv_scale); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__amp_update_scale_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle growth_tracker, AtenTensorHandle found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__amp_update_scale(AtenTensorHandle self, AtenTensorHandle growth_tracker, AtenTensorHandle found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_add_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_add_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_add_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_add_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sub_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sub_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sub_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_mul_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_mul_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_mul_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_mul_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_div_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_div_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_div_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_div_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_clamp_max_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_clamp_max_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_clamp_max_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_clamp_min_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_clamp_min_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_clamp_min_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_maximum_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_maximum_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_maximum_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_minimum_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_minimum_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_minimum_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_addcdiv_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_addcdiv_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_addcdiv_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, AtenTensorHandle scalars); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_addcmul_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_addcmul_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_addcmul_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, AtenTensorHandle scalars); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_abs_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_acos_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_asin_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_atan_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_ceil_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_cos_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_cosh_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_erf_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_erfc_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_exp_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_expm1_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_floor_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_frac_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_lerp_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensors1, int64_t tensors1_len_, const AtenTensorHandle* weights, int64_t weights_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_lerp_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensors1, int64_t tensors1_len_, double weight); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_lgamma_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_log_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_log10_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_log1p_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_log2_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_neg_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_norm_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double ord); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_pow_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* exponent, int64_t exponent_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_pow_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double exponent); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_pow_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* exponent, int64_t exponent_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_reciprocal_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_round_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sigmoid_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sign_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sin_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sinh_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_sqrt_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_tan_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_tanh_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_trunc_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_zero_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foreach_copy_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* src, int64_t src_len_, int32_t non_blocking); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_bucketize_Scalar_out(AtenTensorHandle out, double self, AtenTensorHandle boundaries, int32_t out_int32, int32_t right); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_glu_jvp_out(AtenTensorHandle out, AtenTensorHandle glu, AtenTensorHandle x, AtenTensorHandle dx, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_glu_backward_jvp_out(AtenTensorHandle out, AtenTensorHandle grad_x, AtenTensorHandle grad_glu, AtenTensorHandle x, AtenTensorHandle dgrad_glu, AtenTensorHandle dx, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_hardswish_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_rrelu_with_noise_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle noise, double lower, double upper, int32_t training, int32_t self_is_result); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_mkldnn_adaptive_avg_pool2d_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__adaptive_avg_pool2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__adaptive_avg_pool2d_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__adaptive_avg_pool3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__adaptive_avg_pool3d_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__slow_conv2d_backward_output_mask_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_conv_depthwise3d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slow_conv_dilated2d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slow_conv_dilated3d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_isinf_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_linalg_matrix_exp_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_optional_intlist_out(AtenTensorHandle out, AtenTensorHandle values, const int64_t** addends, int64_t addends_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_optional_filled_intlist_out(AtenTensorHandle out, AtenTensorHandle values, const int64_t** addends, int64_t addends_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_optional_floatlist_out(AtenTensorHandle out, AtenTensorHandle values, const double** addends, int64_t addends_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_warn_in_autograd_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_autograd_multiple_dispatch_fullcoverage_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__test_autograd_multiple_dispatch_view_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_segment_reduce_out(AtenTensorHandle out, AtenTensorHandle data, const char* reduce, AtenTensorHandle* lengths, AtenTensorHandle* indices, AtenTensorHandle* offsets, int64_t axis, int32_t unsafe, double* initial); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__segment_reduce_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle output, AtenTensorHandle data, const char* reduce, AtenTensorHandle* lengths, AtenTensorHandle* offsets, int64_t axis, double* initial); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__nested_tensor_from_tensor_list_out(AtenTensorHandle out, const AtenTensorHandle* list, int64_t list_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fw_primal_copy_out(AtenTensorHandle out, AtenTensorHandle self, int64_t level); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__make_dual_copy_out(AtenTensorHandle out, AtenTensorHandle primal, AtenTensorHandle tangent, int64_t level); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_as_real_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_as_complex_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__conj_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__neg_view_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_as_strided_copy_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int64_t* storage_offset); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__sparse_broadcast_to_copy_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_diagonal_copy_out(AtenTensorHandle out, AtenTensorHandle self, int64_t offset, int64_t dim1, int64_t dim2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_expand_copy_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t implicit); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_permute_copy_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dims, int64_t dims_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__reshape_alias_copy_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_select_copy_int_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int64_t index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_detach_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_slice_copy_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int64_t* start, int64_t* end, int64_t step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_squeeze_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_squeeze_copy_dim_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_squeeze_copy_dims_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_t_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_transpose_copy_int_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim0, int64_t dim1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unsqueeze_copy_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__indices_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__values_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_indices_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_values_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_crow_indices_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_col_indices_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_ccol_indices_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_row_indices_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_copy_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_view_copy_dtype_out(AtenTensorHandle out, AtenTensorHandle self, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_unfold_copy_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dimension, int64_t size, int64_t step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_alias_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu_to_padded_tensor_out(AtenTensorHandle out, AtenTensorHandle self, double padding, const int64_t** output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__transformer_encoder_layer_fwd_out(AtenTensorHandle out, AtenTensorHandle src, int64_t embed_dim, int64_t num_heads, AtenTensorHandle qkv_weight, AtenTensorHandle qkv_bias, AtenTensorHandle proj_weight, AtenTensorHandle proj_bias, int32_t use_gelu, int32_t norm_first, double eps, AtenTensorHandle norm_weight_1, AtenTensorHandle norm_bias_1, AtenTensorHandle norm_weight_2, AtenTensorHandle norm_bias_2, AtenTensorHandle ffn_weight_1, AtenTensorHandle ffn_bias_1, AtenTensorHandle ffn_weight_2, AtenTensorHandle ffn_bias_2, AtenTensorHandle* mask, int64_t* mask_type); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__native_multi_head_attention_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, int64_t embed_dim, int64_t num_head, AtenTensorHandle qkv_weight, AtenTensorHandle qkv_bias, AtenTensorHandle proj_weight, AtenTensorHandle proj_bias, AtenTensorHandle* mask, int32_t need_weights, int32_t average_attn_weights, int64_t* mask_type); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__triton_scaled_dot_attention_out(AtenTensorHandle out, AtenTensorHandle q, AtenTensorHandle k, AtenTensorHandle v, double dropout_p); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__triton_multi_head_attention_out(AtenTensorHandle out, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, int64_t embed_dim, int64_t num_head, AtenTensorHandle qkv_weight, AtenTensorHandle qkv_bias, AtenTensorHandle proj_weight, AtenTensorHandle proj_bias, AtenTensorHandle* mask); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__foobar_out(AtenTensorHandle out, AtenTensorHandle self, int32_t arg1, int32_t arg2, int32_t arg3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fused_adam_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* exp_avgs, int64_t exp_avgs_len_, const AtenTensorHandle* exp_avg_sqs, int64_t exp_avg_sqs_len_, const AtenTensorHandle* max_exp_avg_sqs, int64_t max_exp_avg_sqs_len_, const AtenTensorHandle* state_steps, int64_t state_steps_len_, double lr, double beta1, double beta2, double weight_decay, double eps, int32_t amsgrad, int32_t maximize, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fused_adam_tensor_lr_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* exp_avgs, int64_t exp_avgs_len_, const AtenTensorHandle* exp_avg_sqs, int64_t exp_avg_sqs_len_, const AtenTensorHandle* max_exp_avg_sqs, int64_t max_exp_avg_sqs_len_, const AtenTensorHandle* state_steps, int64_t state_steps_len_, AtenTensorHandle lr, double beta1, double beta2, double weight_decay, double eps, int32_t amsgrad, int32_t maximize, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fused_adamw_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* exp_avgs, int64_t exp_avgs_len_, const AtenTensorHandle* exp_avg_sqs, int64_t exp_avg_sqs_len_, const AtenTensorHandle* max_exp_avg_sqs, int64_t max_exp_avg_sqs_len_, const AtenTensorHandle* state_steps, int64_t state_steps_len_, double lr, double beta1, double beta2, double weight_decay, double eps, int32_t amsgrad, int32_t maximize, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fused_adamw_tensor_lr_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* exp_avgs, int64_t exp_avgs_len_, const AtenTensorHandle* exp_avg_sqs, int64_t exp_avg_sqs_len_, const AtenTensorHandle* max_exp_avg_sqs, int64_t max_exp_avg_sqs_len_, const AtenTensorHandle* state_steps, int64_t state_steps_len_, AtenTensorHandle lr, double beta1, double beta2, double weight_decay, double eps, int32_t amsgrad, int32_t maximize, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fused_sgd_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* momentum_buffer_list, int64_t momentum_buffer_list_len_, double weight_decay, double momentum, double lr, double dampening, int32_t nesterov, int32_t maximize, int32_t is_first_step, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__fused_sgd_tensor_lr_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* momentum_buffer_list, int64_t momentum_buffer_list_len_, double weight_decay, double momentum, AtenTensorHandle lr, double dampening, int32_t nesterov, int32_t maximize, int32_t is_first_step, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); + +#ifdef __cplusplus +} // extern "C" +#endif + diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/generated/c_shim_cuda.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/generated/c_shim_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..f0198503589eb1b198d91b4e821b6098d0ba3afc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/generated/c_shim_cuda.h @@ -0,0 +1,1316 @@ + +#pragma once + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fw_primal(AtenTensorHandle self, int64_t level, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__make_dual(AtenTensorHandle primal, AtenTensorHandle tangent, int64_t level, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__new_zeros_with_same_feature_meta(AtenTensorHandle self, AtenTensorHandle other, int64_t self_num_batch_dims, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__has_same_storage_numel(AtenTensorHandle self, AtenTensorHandle other, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__assert_async(AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__assert_async_msg(AtenTensorHandle self, const char* assert_msg); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__assert_scalar(double self, const char* assert_msg); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__functional_assert_scalar(double self, const char* assert_msg, AtenTensorHandle dep_token, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__print(const char* s); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sym_constrain_range(double size, int64_t* min, int64_t* max); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sym_constrain_range_for_size(double size, int64_t* min, int64_t* max); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__functional_sym_constrain_range(double size, int64_t* min, int64_t* max, AtenTensorHandle dep_token, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__functional_sym_constrain_range_for_size(double size, int64_t* min, int64_t* max, AtenTensorHandle dep_token, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__use_cudnn_ctc_loss(AtenTensorHandle log_probs, AtenTensorHandle targets, const int64_t* input_lengths, int64_t input_lengths_len_, const int64_t* target_lengths, int64_t target_lengths_len_, int64_t blank, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__use_cudnn_ctc_loss_Tensor(AtenTensorHandle log_probs, AtenTensorHandle targets, AtenTensorHandle input_lengths, AtenTensorHandle target_lengths, int64_t blank, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cudnn_ctc_loss(AtenTensorHandle log_probs, AtenTensorHandle targets, const int64_t* input_lengths, int64_t input_lengths_len_, const int64_t* target_lengths, int64_t target_lengths_len_, int64_t blank, int32_t deterministic, int32_t zero_infinity, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cudnn_ctc_loss_Tensor(AtenTensorHandle log_probs, AtenTensorHandle targets, AtenTensorHandle input_lengths, AtenTensorHandle target_lengths, int64_t blank, int32_t deterministic, int32_t zero_infinity, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cudnn_rnn_flatten_weight(const AtenTensorHandle* weight_arr, int64_t weight_arr_len_, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int32_t batch_first, int32_t bidirectional, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cudnn_rnn(AtenTensorHandle input, const AtenTensorHandle* weight, int64_t weight_len_, int64_t weight_stride0, AtenTensorHandle* weight_buf, AtenTensorHandle hx, AtenTensorHandle* cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int32_t batch_first, double dropout, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, AtenTensorHandle* dropout_state, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cudnn_init_dropout_state(double dropout, int32_t train, int64_t dropout_seed, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__masked_scale(AtenTensorHandle self, AtenTensorHandle mask, double scale, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_dropout(AtenTensorHandle input, double p, int32_t* train, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_abs_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_as_real(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_as_complex(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__conj(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__conj_physical(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__neg_view(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_addmv_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat, AtenTensorHandle vec, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_addr(AtenTensorHandle self, AtenTensorHandle vec1, AtenTensorHandle vec2, double beta, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_addr_(AtenTensorHandle self, AtenTensorHandle vec1, AtenTensorHandle vec2, double beta, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_addr_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle vec1, AtenTensorHandle vec2, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_affine_grid_generator(AtenTensorHandle theta, const int64_t* size, int64_t size_len_, int32_t align_corners, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__is_all_true(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__is_any_true(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_all_dims(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_all_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_all_dims_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_allclose(AtenTensorHandle self, AtenTensorHandle other, double rtol, double atol, int32_t equal_nan, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_any_dims(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_any_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_any_dims_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_arange(double end, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_arange_start(double start, double end, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_arange_start_step(double start, double end, double step, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_arange_out(AtenTensorHandle out, double end); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_arange_start_out(AtenTensorHandle out, double start, double end, double step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_argmax_out(AtenTensorHandle out, AtenTensorHandle self, int64_t* dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_argmin_out(AtenTensorHandle out, AtenTensorHandle self, int64_t* dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_as_strided(AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int64_t* storage_offset, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_as_strided_(AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int64_t* storage_offset, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_baddbmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bartlett_window(int64_t window_length, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bartlett_window_periodic(int64_t window_length, int32_t periodic, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_binary_cross_entropy(AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_binary_cross_entropy_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_binary_cross_entropy_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_binary_cross_entropy_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_binary_cross_entropy_with_logits(AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, AtenTensorHandle* pos_weight, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bincount(AtenTensorHandle self, AtenTensorHandle* weights, int64_t minlength, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_copysign__Scalar(AtenTensorHandle self, double other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__lazy_clone(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_blackman_window(int64_t window_length, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_blackman_window_periodic(int64_t window_length, int32_t periodic, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cat_out(AtenTensorHandle out, const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_block_diag(const AtenTensorHandle* tensors, int64_t tensors_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_complex(AtenTensorHandle real, AtenTensorHandle imag, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_complex_out(AtenTensorHandle out, AtenTensorHandle real, AtenTensorHandle imag); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_polar(AtenTensorHandle abs, AtenTensorHandle angle, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_polar_out(AtenTensorHandle out, AtenTensorHandle abs, AtenTensorHandle angle); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_constant_pad_nd(AtenTensorHandle self, const int64_t* pad, int64_t pad_len_, double value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_convolution(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_convolution_backward(AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle weight, const int64_t** bias_sizes, int64_t bias_sizes_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_convolution_overrideable(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_convolution_backward_overrideable(AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle weight, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__convolution(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, int32_t benchmark, int32_t deterministic, int32_t cudnn_enabled, int32_t allow_tf32, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_conv_tbc(AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle bias, int64_t pad, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_copy(AtenTensorHandle self, AtenTensorHandle src, int32_t non_blocking, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_copy_(AtenTensorHandle self, AtenTensorHandle src, int32_t non_blocking, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_count_nonzero_dim_IntList(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_count_nonzero(AtenTensorHandle self, int64_t* dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_affine_grid_generator(AtenTensorHandle theta, int64_t N, int64_t C, int64_t H, int64_t W, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_affine_grid_generator_backward(AtenTensorHandle grad, int64_t N, int64_t C, int64_t H, int64_t W, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_batch_norm(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, int32_t training, double exponential_average_factor, double epsilon, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_batch_norm_backward(AtenTensorHandle input, AtenTensorHandle grad_output, AtenTensorHandle weight, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, AtenTensorHandle* save_mean, AtenTensorHandle* save_var, double epsilon, AtenTensorHandle reserveSpace, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_convolution(AtenTensorHandle self, AtenTensorHandle weight, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic, int32_t allow_tf32, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_convolution_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic, int32_t allow_tf32); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_convolution_transpose(AtenTensorHandle self, AtenTensorHandle weight, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic, int32_t allow_tf32, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_convolution_relu(AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_convolution_add_relu(AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle z, double* alpha, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_grid_sampler(AtenTensorHandle self, AtenTensorHandle grid, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_grid_sampler_backward(AtenTensorHandle self, AtenTensorHandle grid, AtenTensorHandle grad_output, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cummax(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cummax_out(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cummax_helper(AtenTensorHandle self, AtenTensorHandle values, AtenTensorHandle indices, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cummin(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cummin_out(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cummin_helper(AtenTensorHandle self, AtenTensorHandle values, AtenTensorHandle indices, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cumprod_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cumsum_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__ctc_loss(AtenTensorHandle log_probs, AtenTensorHandle targets, const int64_t* input_lengths, int64_t input_lengths_len_, const int64_t* target_lengths, int64_t target_lengths_len_, int64_t blank, int32_t zero_infinity, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__ctc_loss_Tensor(AtenTensorHandle log_probs, AtenTensorHandle targets, AtenTensorHandle input_lengths, AtenTensorHandle target_lengths, int64_t blank, int32_t zero_infinity, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__ctc_loss_backward(AtenTensorHandle grad, AtenTensorHandle log_probs, AtenTensorHandle targets, const int64_t* input_lengths, int64_t input_lengths_len_, const int64_t* target_lengths, int64_t target_lengths_len_, AtenTensorHandle neg_log_likelihood, AtenTensorHandle log_alpha, int64_t blank, int32_t zero_infinity, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__ctc_loss_backward_Tensor(AtenTensorHandle grad, AtenTensorHandle log_probs, AtenTensorHandle targets, AtenTensorHandle input_lengths, AtenTensorHandle target_lengths, AtenTensorHandle neg_log_likelihood, AtenTensorHandle log_alpha, int64_t blank, int32_t zero_infinity, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_diag_embed(AtenTensorHandle self, int64_t offset, int64_t dim1, int64_t dim2, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_diagonal(AtenTensorHandle self, int64_t offset, int64_t dim1, int64_t dim2, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_diagonal_backward(AtenTensorHandle grad_output, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t offset, int64_t dim1, int64_t dim2, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_dot(AtenTensorHandle self, AtenTensorHandle tensor, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_dot_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle tensor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_vdot(AtenTensorHandle self, AtenTensorHandle other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_vdot_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_embedding(AtenTensorHandle weight, AtenTensorHandle indices, int64_t padding_idx, int32_t scale_grad_by_freq, int32_t sparse, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_embedding_dense_backward(AtenTensorHandle grad_output, AtenTensorHandle indices, int64_t num_weights, int64_t padding_idx, int32_t scale_grad_by_freq, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_embedding_renorm_(AtenTensorHandle self, AtenTensorHandle indices, double max_norm, double norm_type, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__embedding_bag_forward_only(AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, int32_t scale_grad_by_freq, int64_t mode, int32_t sparse, AtenTensorHandle* per_sample_weights, int32_t include_last_offset, int64_t padding_idx, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__embedding_bag(AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, int32_t scale_grad_by_freq, int64_t mode, int32_t sparse, AtenTensorHandle* per_sample_weights, int32_t include_last_offset, int64_t padding_idx, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__embedding_bag_dense_backward(AtenTensorHandle grad, AtenTensorHandle indices, AtenTensorHandle offset2bag, AtenTensorHandle bag_size, AtenTensorHandle maximum_indices, int64_t num_weights, int32_t scale_grad_by_freq, int64_t mode, AtenTensorHandle* per_sample_weights, int64_t padding_idx, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__embedding_bag_per_sample_weights_backward(AtenTensorHandle grad, AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, AtenTensorHandle offset2bag, int64_t mode, int64_t padding_idx, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_empty_memory_format(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_empty_permuted(const int64_t* size, int64_t size_len_, const int64_t* physical_layout, int64_t physical_layout_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_new_empty(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_new_empty_strided(AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_new_full(AtenTensorHandle self, const int64_t* size, int64_t size_len_, double fill_value, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_new_zeros(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_new_ones(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_resize_(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_empty_like(AtenTensorHandle self, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_empty_strided(const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_expand(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t implicit, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_eye(int64_t n, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_eye_m(int64_t n, int64_t m, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_eye_out(AtenTensorHandle out, int64_t n); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_eye_m_out(AtenTensorHandle out, int64_t n, int64_t m); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fill_Scalar(AtenTensorHandle self, double value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fill_Tensor(AtenTensorHandle self, AtenTensorHandle value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fill__Scalar(AtenTensorHandle self, double value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fill__Tensor(AtenTensorHandle self, AtenTensorHandle value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_floor_divide(AtenTensorHandle self, AtenTensorHandle other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_floor_divide__Tensor(AtenTensorHandle self, AtenTensorHandle other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_floor_divide_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_floor_divide_Scalar(AtenTensorHandle self, double other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_floor_divide__Scalar(AtenTensorHandle self, double other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_full(const int64_t* size, int64_t size_len_, double fill_value, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_full_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_, double fill_value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_full_like(AtenTensorHandle self, double fill_value, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_grid_sampler_2d(AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_grid_sampler_2d_backward(AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__grid_sampler_2d_cpu_fallback(AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_grid_sampler_3d(AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_grid_sampler_3d_backward(AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hann_window(int64_t window_length, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hann_window_periodic(int64_t window_length, int32_t periodic, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hamming_window(int64_t window_length, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hamming_window_periodic(int64_t window_length, int32_t periodic, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hamming_window_periodic_alpha(int64_t window_length, int32_t periodic, double alpha, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hamming_window_periodic_alpha_beta(int64_t window_length, int32_t periodic, double alpha, double beta, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_kaiser_window(int64_t window_length, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_kaiser_window_periodic(int64_t window_length, int32_t periodic, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_kaiser_window_beta(int64_t window_length, int32_t periodic, double beta, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_group_norm(AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_group_norm_backward(AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle rstd, AtenTensorHandle* weight, int64_t N, int64_t C, int64_t HxW, int64_t group, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fft_r2c(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int32_t onesided, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fft_r2c_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int32_t onesided); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fft_c2r(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int64_t last_dim_size, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fft_c2r_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int64_t last_dim_size); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fft_c2c(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int32_t forward, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fft_c2c_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int64_t normalization, int32_t forward); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__validate_compressed_sparse_indices(int32_t is_crow, AtenTensorHandle compressed_idx, AtenTensorHandle plain_idx, int64_t cdim, int64_t dim, int64_t nnz); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__unsafe_index_Tensor(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_copy_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle source); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_put_(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_put(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__unsafe_index_put(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__index_put_impl_(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, int32_t unsafe, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_isin_Tensor_Tensor_out(AtenTensorHandle out, AtenTensorHandle elements, AtenTensorHandle test_elements, int32_t assume_unique, int32_t invert); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_isin_Tensor_Scalar_out(AtenTensorHandle out, AtenTensorHandle elements, double test_element, int32_t assume_unique, int32_t invert); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_isin_Scalar_Tensor_out(AtenTensorHandle out, double element, AtenTensorHandle test_elements, int32_t assume_unique, int32_t invert); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_is_same_size(AtenTensorHandle self, AtenTensorHandle other, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_kthvalue(AtenTensorHandle self, int64_t k, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_kthvalue_values(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t k, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_layer_norm(AtenTensorHandle input, const int64_t* normalized_shape, int64_t normalized_shape_len_, AtenTensorHandle* weight, AtenTensorHandle* bias, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_layer_norm_backward(AtenTensorHandle grad_out, AtenTensorHandle input, const int64_t* normalized_shape, int64_t normalized_shape_len_, AtenTensorHandle mean, AtenTensorHandle rstd, AtenTensorHandle* weight, AtenTensorHandle* bias, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linear_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cslt_compress(AtenTensorHandle input, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cslt_sparse_mm(AtenTensorHandle compressed_A, AtenTensorHandle dense_B, AtenTensorHandle* bias, AtenTensorHandle* alpha, int32_t* out_dtype, int32_t transpose_result, int64_t alg_id, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cslt_sparse_mm_search(AtenTensorHandle compressed_A, AtenTensorHandle dense_B, AtenTensorHandle* bias, AtenTensorHandle* alpha, int32_t* out_dtype, int32_t transpose_result, int64_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_semi_structured_linear(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle meta, AtenTensorHandle* bias, const char** activation, int32_t* out_dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__mixed_dtypes_linear(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle scale, AtenTensorHandle* bias, const char** activation, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linspace(double start, double end, int64_t steps, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linspace_Tensor_Tensor(AtenTensorHandle start, AtenTensorHandle end, int64_t steps, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linspace_Tensor_Scalar(AtenTensorHandle start, double end, int64_t steps, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linspace_Scalar_Tensor(double start, AtenTensorHandle end, int64_t steps, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linspace_out(AtenTensorHandle out, double start, double end, int64_t steps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linspace_Tensor_Tensor_out(AtenTensorHandle out, AtenTensorHandle start, AtenTensorHandle end, int64_t steps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linspace_Tensor_Scalar_out(AtenTensorHandle out, AtenTensorHandle start, double end, int64_t steps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linspace_Scalar_Tensor_out(AtenTensorHandle out, double start, AtenTensorHandle end, int64_t steps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_xlogy__Scalar_Other(AtenTensorHandle self, double other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_logspace(double start, double end, int64_t steps, double base, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_logspace_Tensor_Tensor(AtenTensorHandle start, AtenTensorHandle end, int64_t steps, double base, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_logspace_Tensor_Scalar(AtenTensorHandle start, double end, int64_t steps, double base, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_logspace_Scalar_Tensor(double start, AtenTensorHandle end, int64_t steps, double base, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_logspace_out(AtenTensorHandle out, double start, double end, int64_t steps, double base); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_logspace_Tensor_Tensor_out(AtenTensorHandle out, AtenTensorHandle start, AtenTensorHandle end, int64_t steps, double base); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_logspace_Tensor_Scalar_out(AtenTensorHandle out, AtenTensorHandle start, double end, int64_t steps, double base); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_logspace_Scalar_Tensor_out(AtenTensorHandle out, double start, AtenTensorHandle end, int64_t steps, double base); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_log_softmax_int_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__log_softmax_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t half_to_float); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__log_softmax_backward_data_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle output, int64_t dim, int32_t input_dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__logcumsumexp(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__logcumsumexp_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_logcumsumexp(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_logcumsumexp_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_logsumexp(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int32_t keepdim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_logsumexp_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__aminmax(AtenTensorHandle self, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__aminmax_dim(AtenTensorHandle self, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_aminmax_out(AtenTensorHandle min, AtenTensorHandle max, AtenTensorHandle self, int64_t* dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__compute_linear_combination(AtenTensorHandle input, AtenTensorHandle coefficients, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__compute_linear_combination_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle coefficients); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_dim_max(AtenTensorHandle max, AtenTensorHandle max_values, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_amax_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mean(AtenTensorHandle self, int32_t* dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mean_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_median(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_median_dim(AtenTensorHandle self, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_median_dim_values(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nanmedian(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nanmedian_dim(AtenTensorHandle self, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nanmedian_dim_values(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_min_dim_min(AtenTensorHandle min, AtenTensorHandle min_indices, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_amin_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_convolution(AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_batch_norm(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, int32_t training, double exponential_average_factor, double epsilon, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_batch_norm_backward(AtenTensorHandle input, AtenTensorHandle grad_output, AtenTensorHandle weight, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, AtenTensorHandle* save_mean, AtenTensorHandle* save_var, double epsilon, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_convolution(AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_convolution_transpose(AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_depthwise_convolution(AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_convolution_relu(AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_convolution_add_relu(AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle z, double* alpha, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_rnn(AtenTensorHandle input, const AtenTensorHandle* weight, int64_t weight_len_, int64_t weight_stride0, AtenTensorHandle hx, AtenTensorHandle* cx, int64_t mode, int64_t hidden_size, int64_t num_layers, int32_t batch_first, double dropout, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, AtenTensorHandle* dropout_state, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__int_mm(AtenTensorHandle self, AtenTensorHandle mat2, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__int_mm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__convert_weight_to_int4pack(AtenTensorHandle self, int64_t innerKTiles, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__weight_int4pack_mm(AtenTensorHandle self, AtenTensorHandle mat2, int64_t qGroupSize, AtenTensorHandle qScaleAndZeros, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mode(AtenTensorHandle self, int64_t dim, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mode_values(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mv(AtenTensorHandle self, AtenTensorHandle vec, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mv_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle vec); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_narrow_copy(AtenTensorHandle self, int64_t dim, int64_t start, int64_t length, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_batch_norm(AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, int32_t training, double momentum, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_batch_norm_out(AtenTensorHandle out, AtenTensorHandle save_mean, AtenTensorHandle save_invstd, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, int32_t training, double momentum, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__native_batch_norm_legit(AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle running_mean, AtenTensorHandle running_var, int32_t training, double momentum, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__native_batch_norm_legit_no_training(AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle running_mean, AtenTensorHandle running_var, double momentum, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__native_batch_norm_legit_out(AtenTensorHandle out, AtenTensorHandle save_mean, AtenTensorHandle save_invstd, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle running_mean, AtenTensorHandle running_var, int32_t training, double momentum, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__native_batch_norm_legit_no_stats(AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, int32_t training, double momentum, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__native_batch_norm_legit_no_stats_out(AtenTensorHandle out, AtenTensorHandle save_mean, AtenTensorHandle save_invstd, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, int32_t training, double momentum, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_stats(AtenTensorHandle input, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_elemt(AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle mean, AtenTensorHandle invstd, double eps, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_elemt_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle mean, AtenTensorHandle invstd, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_gather_stats(AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle invstd, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, double momentum, double eps, int64_t count, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_gather_stats_with_counts(AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle invstd, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, double momentum, double eps, AtenTensorHandle counts, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_batch_norm_backward(AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, AtenTensorHandle* save_mean, AtenTensorHandle* save_invstd, int32_t train, double eps, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_backward_reduce(AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle invstd, AtenTensorHandle* weight, int32_t input_g, int32_t weight_g, int32_t bias_g, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_backward_elemt(AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle invstd, AtenTensorHandle* weight, AtenTensorHandle sum_dy, AtenTensorHandle sum_dy_xmu, AtenTensorHandle count, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_update_stats(AtenTensorHandle input, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, double momentum, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nnpack_spatial_convolution(AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_ones(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_ones_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_ones_like(AtenTensorHandle self, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__euclidean_dist(AtenTensorHandle x1, AtenTensorHandle x2, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cdist_forward(AtenTensorHandle x1, AtenTensorHandle x2, double p, int64_t* compute_mode, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cdist_backward(AtenTensorHandle grad, AtenTensorHandle x1, AtenTensorHandle x2, double p, AtenTensorHandle cdist, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__pdist_forward(AtenTensorHandle self, double p, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__pdist_backward(AtenTensorHandle grad, AtenTensorHandle self, double p, AtenTensorHandle pdist, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_permute(AtenTensorHandle self, const int64_t* dims, int64_t dims_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_pixel_shuffle(AtenTensorHandle self, int64_t upscale_factor, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_pixel_unshuffle(AtenTensorHandle self, int64_t downscale_factor, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_channel_shuffle(AtenTensorHandle self, int64_t groups, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_is_pinned(AtenTensorHandle self, int32_t* device, int32_t device_index_, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__pin_memory(AtenTensorHandle self, int32_t* device, int32_t device_index_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rad2deg(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rad2deg_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rad2deg_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_scalar_tensor(double s, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rand(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rand_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rand_like(AtenTensorHandle self, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randint(int64_t high, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randint_low(int64_t low, int64_t high, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randint_out(AtenTensorHandle out, int64_t high, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randint_low_out(AtenTensorHandle out, int64_t low, int64_t high, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randint_like(AtenTensorHandle self, int64_t high, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randint_like_low_dtype(AtenTensorHandle self, int64_t low, int64_t high, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randn(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randn_like(AtenTensorHandle self, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randperm(int64_t n, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randperm_out(AtenTensorHandle out, int64_t n); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_range_step(double start, double end, double step, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_range(double start, double end, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_range_out_(AtenTensorHandle out, double start, double end); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_range_out(AtenTensorHandle out, double start, double end, double step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_repeat(AtenTensorHandle self, const int64_t* repeats, int64_t repeats_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_repeat_interleave_Tensor(AtenTensorHandle repeats, int64_t* output_size, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__reshape_copy(AtenTensorHandle self, const int64_t* size, int64_t size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__reshape_alias(AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__prelu_kernel(AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__prelu_kernel_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_gelu_out(AtenTensorHandle out, AtenTensorHandle self, const char* approximate); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_gelu_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const char* approximate); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardshrink_out(AtenTensorHandle out, AtenTensorHandle self, double lambd); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardshrink_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_out, AtenTensorHandle self, double lambd); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_select_int(AtenTensorHandle self, int64_t dim, int64_t index, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_select_backward(AtenTensorHandle grad_output, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t dim, int64_t index, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_celu(AtenTensorHandle self, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_celu_(AtenTensorHandle self, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mish_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mish_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_detach(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_detach_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slice_Tensor(AtenTensorHandle self, int64_t dim, int64_t* start, int64_t* end, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slice_backward(AtenTensorHandle grad_output, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t dim, int64_t start, int64_t end, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slice_inverse(AtenTensorHandle self, AtenTensorHandle src, int64_t dim, int64_t* start, int64_t* end, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slice_scatter(AtenTensorHandle self, AtenTensorHandle src, int64_t dim, int64_t* start, int64_t* end, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_select_scatter(AtenTensorHandle self, AtenTensorHandle src, int64_t dim, int64_t index, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_diagonal_scatter(AtenTensorHandle self, AtenTensorHandle src, int64_t offset, int64_t dim1, int64_t dim2, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_as_strided_scatter(AtenTensorHandle self, AtenTensorHandle src, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int64_t* storage_offset, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_softmax_int_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__softmax_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t half_to_float); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__softmax_backward_data_out(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle output, int64_t dim, int32_t input_dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_squeeze(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_squeeze_dim(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_squeeze_dims(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_squeeze_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_squeeze__dim(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_squeeze__dims(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sspaddmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__chunk_cat(const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim, int64_t num_chunks, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__chunk_cat_out(AtenTensorHandle out, const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim, int64_t num_chunks); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_stack(const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_stack_out(AtenTensorHandle out, const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__stack(const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__stack_out(AtenTensorHandle out, const AtenTensorHandle* tensors, int64_t tensors_len_, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sum(AtenTensorHandle self, int32_t* dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sum_IntList_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nansum(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nansum_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_std_correction(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_std_mean_correction(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_std_correction_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_prod(AtenTensorHandle self, int32_t* dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_prod_int_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_t(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_t_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_threshold_out(AtenTensorHandle out, AtenTensorHandle self, double threshold, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_threshold_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, double threshold); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_transpose_int(AtenTensorHandle self, int64_t dim0, int64_t dim1, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_transpose_(AtenTensorHandle self, int64_t dim0, int64_t dim1, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_flip(AtenTensorHandle self, const int64_t* dims, int64_t dims_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_roll(AtenTensorHandle self, const int64_t* shifts, int64_t shifts_len_, const int64_t* dims, int64_t dims_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rot90(AtenTensorHandle self, int64_t k, const int64_t* dims, int64_t dims_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__transform_bias_rescale_qkv(AtenTensorHandle qkv, AtenTensorHandle qkv_bias, int64_t num_heads, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_tensor_from_mask(AtenTensorHandle t, AtenTensorHandle mask, int32_t mask_check, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_tensor_from_mask_left_aligned(AtenTensorHandle t, AtenTensorHandle mask, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_from_padded(AtenTensorHandle padded, AtenTensorHandle cpu_nested_shape_example, int32_t fuse_transform_0213, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_view_from_buffer(AtenTensorHandle self, AtenTensorHandle nested_size, AtenTensorHandle nested_strides, AtenTensorHandle offsets, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_view_from_buffer_copy(AtenTensorHandle self, AtenTensorHandle nested_size, AtenTensorHandle nested_strides, AtenTensorHandle offsets, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_view_from_jagged_copy(AtenTensorHandle self, AtenTensorHandle offsets, AtenTensorHandle dummy, AtenTensorHandle* lengths, int64_t ragged_idx, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_get_values_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__trilinear(AtenTensorHandle i1, AtenTensorHandle i2, AtenTensorHandle i3, const int64_t* expand1, int64_t expand1_len_, const int64_t* expand2, int64_t expand2_len_, const int64_t* expand3, int64_t expand3_len_, const int64_t* sumdim, int64_t sumdim_len_, int64_t unroll_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__unique(AtenTensorHandle self, int32_t sorted, int32_t return_inverse, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unique_dim(AtenTensorHandle self, int64_t dim, int32_t sorted, int32_t return_inverse, int32_t return_counts, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unique_consecutive(AtenTensorHandle self, int32_t return_inverse, int32_t return_counts, int64_t* dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unique_dim_consecutive(AtenTensorHandle self, int64_t dim, int32_t return_inverse, int32_t return_counts, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__unique2(AtenTensorHandle self, int32_t sorted, int32_t return_inverse, int32_t return_counts, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__unsafe_view(AtenTensorHandle self, const int64_t* size, int64_t size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unsqueeze(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unsqueeze_(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_var_correction(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_var_correction_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_var_mean_correction(AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_where_self_out(AtenTensorHandle out, AtenTensorHandle condition, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__weight_norm_interface(AtenTensorHandle v, AtenTensorHandle g, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__weight_norm_interface_backward(AtenTensorHandle grad_w, AtenTensorHandle saved_v, AtenTensorHandle saved_g, AtenTensorHandle saved_norms, int64_t dim, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__efficientzerotensor(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_zeros(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_zeros_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_zeros_like(AtenTensorHandle self, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__standard_gamma_grad(AtenTensorHandle self, AtenTensorHandle output, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__dirichlet_grad(AtenTensorHandle x, AtenTensorHandle alpha, AtenTensorHandle total, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_sum_dim(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_norm_ScalarOpt_dtype(AtenTensorHandle self, double* p, int32_t dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_norm_Scalar(AtenTensorHandle self, double p, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_norm_dtype_out(AtenTensorHandle out, AtenTensorHandle self, double* p, const int64_t* dim, int64_t dim_len_, int32_t keepdim, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_norm_out(AtenTensorHandle out, AtenTensorHandle self, double* p, const int64_t* dim, int64_t dim_len_, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_resize_as_(AtenTensorHandle self, AtenTensorHandle the_template, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_zero_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rsub_Tensor(AtenTensorHandle self, AtenTensorHandle other, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_addmm(AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_addmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__addmm_activation_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha, int32_t use_gelu); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_mm(AtenTensorHandle self, AtenTensorHandle mat2, AtenTensorHandle* bias, int32_t* out_dtype, AtenTensorHandle* scale_a, AtenTensorHandle* scale_b, AtenTensorHandle* scale_result, int32_t use_fast_accum, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_mm_out(AtenTensorHandle out, AtenTensorHandle out_amax, AtenTensorHandle self, AtenTensorHandle mat2, AtenTensorHandle* bias, int32_t* out_dtype, AtenTensorHandle* scale_a, AtenTensorHandle* scale_b, AtenTensorHandle* scale_result, int32_t use_fast_accum); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sparse_compressed_tensor_comp_plain_value_size(AtenTensorHandle compressed_indices, AtenTensorHandle plain_indices, AtenTensorHandle values, const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sparse_compressed_tensor_comp_plain_value(AtenTensorHandle compressed_indices, AtenTensorHandle plain_indices, AtenTensorHandle values, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sparse_coo_tensor_size(const int64_t* size, int64_t size_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sparse_dim(AtenTensorHandle self, int64_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_dense_dim(AtenTensorHandle self, int64_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_is_coalesced(AtenTensorHandle self, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_indices(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_values(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_crow_indices(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_col_indices(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_ccol_indices(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_row_indices(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse_sparse_dim(AtenTensorHandle self, int64_t sparse_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse(AtenTensorHandle self, int32_t* layout, const int64_t** blocksize, int64_t blocksize_len_, int64_t* dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse_csr(AtenTensorHandle self, int64_t* dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse_csc(AtenTensorHandle self, int64_t* dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse_bsr(AtenTensorHandle self, const int64_t* blocksize, int64_t blocksize_len_, int64_t* dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse_bsc(AtenTensorHandle self, const int64_t* blocksize, int64_t blocksize_len_, int64_t* dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse_semi_structured(AtenTensorHandle dense, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_quantize_per_tensor_dynamic(AtenTensorHandle self, int32_t dtype, int32_t reduce_range, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_quantize_per_tensor(AtenTensorHandle self, double scale, int64_t zero_point, int32_t dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_quantize_per_tensor_tensor_qparams(AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int32_t dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_quantize_per_channel(AtenTensorHandle self, AtenTensorHandle scales, AtenTensorHandle zero_points, int64_t axis, int32_t dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_dequantize_self(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__make_per_tensor_quantized_tensor(AtenTensorHandle self, double scale, int64_t zero_point, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__make_per_channel_quantized_tensor(AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fake_quantize_per_tensor_affine_cachemask(AtenTensorHandle self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fake_quantize_per_tensor_affine_cachemask_tensor_qparams(AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, AtenTensorHandle fake_quant_enabled, int64_t quant_min, int64_t quant_max, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fake_quantize_learnable_per_tensor_affine(AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fake_quantize_learnable_per_tensor_affine_backward(AtenTensorHandle grad, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fake_quantize_per_channel_affine_cachemask(AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fake_quantize_learnable_per_channel_affine(AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fake_quantize_learnable_per_channel_affine_backward(AtenTensorHandle grad, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_moving_avg_obs_fq_helper(AtenTensorHandle self, AtenTensorHandle observer_on, AtenTensorHandle fake_quant_on, AtenTensorHandle running_min, AtenTensorHandle running_max, AtenTensorHandle scale, AtenTensorHandle zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int32_t per_row_fake_quant, int32_t symmetric_quant, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_copy(AtenTensorHandle self, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, int32_t non_blocking, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__local_scalar_dense(AtenTensorHandle self, double* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__thnn_fused_lstm_cell(AtenTensorHandle input_gates, AtenTensorHandle hidden_gates, AtenTensorHandle cx, AtenTensorHandle* input_bias, AtenTensorHandle* hidden_bias, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__thnn_fused_lstm_cell_backward_impl(AtenTensorHandle* grad_hy, AtenTensorHandle* grad_cy, AtenTensorHandle cx, AtenTensorHandle cy, AtenTensorHandle workspace, int32_t has_bias, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__thnn_fused_gru_cell(AtenTensorHandle input_gates, AtenTensorHandle hidden_gates, AtenTensorHandle hx, AtenTensorHandle* input_bias, AtenTensorHandle* hidden_bias, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__thnn_fused_gru_cell_backward(AtenTensorHandle grad_hy, AtenTensorHandle workspace, int32_t has_bias, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__pack_padded_sequence(AtenTensorHandle input, AtenTensorHandle lengths, int32_t batch_first, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_set__source_Tensor(AtenTensorHandle self, AtenTensorHandle source, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_set_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_lift(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_lift_fresh(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_lift_fresh_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_is_set_to(AtenTensorHandle self, AtenTensorHandle tensor, int32_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_fill__Scalar(AtenTensorHandle self, AtenTensorHandle mask, double value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_fill__Tensor(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_fill_Tensor(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_scatter_(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle source, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_scatter(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle source, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_scatter_backward(AtenTensorHandle grad_output, AtenTensorHandle mask, const int64_t* sizes, int64_t sizes_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__masked_softmax(AtenTensorHandle self, AtenTensorHandle mask, int64_t* dim, int64_t* mask_type, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__masked_softmax_backward(AtenTensorHandle grad_output, AtenTensorHandle output, AtenTensorHandle mask, int64_t* dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view(AtenTensorHandle self, const int64_t* size, int64_t size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_dtype(AtenTensorHandle self, int32_t dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_put_(AtenTensorHandle self, AtenTensorHandle index, AtenTensorHandle source, int32_t accumulate, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_put(AtenTensorHandle self, AtenTensorHandle index, AtenTensorHandle source, int32_t accumulate, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_add_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle source, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_reduce_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle source, const char* reduce, int32_t include_self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_fill__int_Scalar(AtenTensorHandle self, int64_t dim, AtenTensorHandle index, double value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_fill_int_Scalar(AtenTensorHandle self, int64_t dim, AtenTensorHandle index, double value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_fill__int_Tensor(AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_fill_int_Tensor(AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle value, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_scatter_src_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_scatter_value_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_scatter_reduce_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src, const char* reduce); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_scatter_value_reduce_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, double value, const char* reduce); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_scatter_add_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_scatter_reduce_two_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle src, const char* reduce, int32_t include_self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda___irshift___Scalar(AtenTensorHandle self, double other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda___irshift___Tensor(AtenTensorHandle self, AtenTensorHandle other, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_addbmm_(AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_addbmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_addbmm(AtenTensorHandle self, AtenTensorHandle batch1, AtenTensorHandle batch2, double beta, double alpha, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_triu_out(AtenTensorHandle out, AtenTensorHandle self, int64_t diagonal); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_tril_out(AtenTensorHandle out, AtenTensorHandle self, int64_t diagonal); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_tril_indices(int64_t row, int64_t col, int64_t offset, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_triu_indices(int64_t row, int64_t col, int64_t offset, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_trace(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_take_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_take(AtenTensorHandle self, AtenTensorHandle index, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_select_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_select(AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_select_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_select(AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nonzero_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nonzero(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_gather_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, int32_t sparse_grad); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_triangular_solve_X(AtenTensorHandle X, AtenTensorHandle M, AtenTensorHandle self, AtenTensorHandle A, int32_t upper, int32_t transpose, int32_t unitriangular); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__linalg_check_errors(AtenTensorHandle info, const char* api_name, int32_t is_matrix); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_solve_triangular_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle B, int32_t upper, int32_t left, int32_t unitriangular); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_solve_triangular(AtenTensorHandle self, AtenTensorHandle B, int32_t upper, int32_t left, int32_t unitriangular, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cholesky_out(AtenTensorHandle out, AtenTensorHandle self, int32_t upper); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cholesky(AtenTensorHandle self, int32_t upper, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cholesky_solve_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle input2, int32_t upper); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cholesky_solve(AtenTensorHandle self, AtenTensorHandle input2, int32_t upper, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cholesky_solve_helper(AtenTensorHandle self, AtenTensorHandle A, int32_t upper, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cholesky_inverse(AtenTensorHandle self, int32_t upper, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cholesky_inverse_out(AtenTensorHandle out, AtenTensorHandle self, int32_t upper); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_geqrf_a(AtenTensorHandle a, AtenTensorHandle tau, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_geqrf(AtenTensorHandle self, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_ormqr_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle input2, AtenTensorHandle input3, int32_t left, int32_t transpose); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_ormqr(AtenTensorHandle self, AtenTensorHandle input2, AtenTensorHandle input3, int32_t left, int32_t transpose, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_lu_unpack_out(AtenTensorHandle P, AtenTensorHandle L, AtenTensorHandle U, AtenTensorHandle LU_data, AtenTensorHandle LU_pivots, int32_t unpack_data, int32_t unpack_pivots); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_dist(AtenTensorHandle self, AtenTensorHandle other, double p, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_histc_out(AtenTensorHandle out, AtenTensorHandle self, int64_t bins, double min, double max); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_histc(AtenTensorHandle self, int64_t bins, double min, double max, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_min(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_min_unary_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_unary_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sort_values(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t dim, int32_t descending); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sort_values_stable(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int32_t* stable, int64_t dim, int32_t descending); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sort(AtenTensorHandle self, int64_t dim, int32_t descending, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_argsort_stable(AtenTensorHandle self, int32_t stable, int64_t dim, int32_t descending, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_topk_values(AtenTensorHandle values, AtenTensorHandle indices, AtenTensorHandle self, int64_t k, int64_t dim, int32_t largest, int32_t sorted); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_all_all_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_any_all_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_renorm_out(AtenTensorHandle out, AtenTensorHandle self, double p, int64_t dim, double maxnorm); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unfold(AtenTensorHandle self, int64_t dimension, int64_t size, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unfold_backward(AtenTensorHandle grad_in, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t dim, int64_t size, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_alias(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__amp_foreach_non_finite_check_and_unscale_(const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle found_inf, AtenTensorHandle inv_scale); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__amp_update_scale_(AtenTensorHandle self, AtenTensorHandle growth_tracker, AtenTensorHandle found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_add__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_add__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_add__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_add__Tensor(const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sub__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sub__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sub__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_mul__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_mul__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_mul__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_mul__Tensor(const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_div__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_div__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_div__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_div__Tensor(const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_clamp_max__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_clamp_max__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_clamp_max__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_clamp_min__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_clamp_min__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_clamp_min__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_maximum__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_maximum__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_maximum__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_minimum__Scalar(const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_minimum__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_minimum__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_addcdiv__Scalar(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_addcdiv__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_addcdiv__Tensor(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, AtenTensorHandle scalars); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_addcmul__Scalar(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_addcmul__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_addcmul__Tensor(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, AtenTensorHandle scalars); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_abs_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_acos_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_asin_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_atan_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_ceil_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_cos_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_cosh_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_erf_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_erfc_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_exp_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_expm1_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_floor_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_frac_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_lerp__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensors1, int64_t tensors1_len_, const AtenTensorHandle* weights, int64_t weights_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_lerp__Scalar(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensors1, int64_t tensors1_len_, double weight); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_lgamma_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_log_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_log10_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_log1p_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_log2_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_neg_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_pow__List(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* exponent, int64_t exponent_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_pow__Scalar(const AtenTensorHandle* self, int64_t self_len_, double exponent); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_pow__ScalarList(const AtenTensorHandle* self, int64_t self_len_, const double* exponent, int64_t exponent_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_reciprocal_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_round_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sigmoid_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sign_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sin_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sinh_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sqrt_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_tan_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_tanh_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_trunc_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_zero_(const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_copy_(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* src, int64_t src_len_, int32_t non_blocking); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bucketize_Tensor(AtenTensorHandle self, AtenTensorHandle boundaries, int32_t out_int32, int32_t right, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bucketize_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle boundaries, int32_t out_int32, int32_t right); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bucketize_Scalar(double self, AtenTensorHandle boundaries, int32_t out_int32, int32_t right, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_searchsorted_Tensor(AtenTensorHandle sorted_sequence, AtenTensorHandle self, int32_t out_int32, int32_t right, const char** side, AtenTensorHandle* sorter, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_searchsorted_Tensor_out(AtenTensorHandle out, AtenTensorHandle sorted_sequence, AtenTensorHandle self, int32_t out_int32, int32_t right, const char** side, AtenTensorHandle* sorter); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_searchsorted_Scalar(AtenTensorHandle sorted_sequence, double self, int32_t out_int32, int32_t right, const char** side, AtenTensorHandle* sorter, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_searchsorted_Scalar_out(AtenTensorHandle out, AtenTensorHandle sorted_sequence, double self, int32_t out_int32, int32_t right, const char** side, AtenTensorHandle* sorter); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__convert_indices_from_coo_to_csr_out(AtenTensorHandle out, AtenTensorHandle self, int64_t size, int32_t out_int32); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__convert_indices_from_csr_to_coo_out(AtenTensorHandle out, AtenTensorHandle crow_indices, AtenTensorHandle col_indices, int32_t out_int32, int32_t transpose); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mse_loss_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mse_loss_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mse_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_multi_margin_loss_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, double p, double margin, AtenTensorHandle* weight, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_multi_margin_loss(AtenTensorHandle self, AtenTensorHandle target, double p, double margin, AtenTensorHandle* weight, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_multi_margin_loss_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, double p, double margin, AtenTensorHandle* weight, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_multi_margin_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, double p, double margin, AtenTensorHandle* weight, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_multilabel_margin_loss_forward_output(AtenTensorHandle output, AtenTensorHandle is_target, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_multilabel_margin_loss_forward(AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_multilabel_margin_loss_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle is_target); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_multilabel_margin_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle is_target, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nll_loss_forward_output(AtenTensorHandle output, AtenTensorHandle total_weight, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, int64_t ignore_index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nll_loss_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, int64_t ignore_index, AtenTensorHandle total_weight); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nll_loss2d_forward_output(AtenTensorHandle output, AtenTensorHandle total_weight, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, int64_t ignore_index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nll_loss2d_forward(AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, int64_t ignore_index, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nll_loss2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, int64_t ignore_index, AtenTensorHandle total_weight); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nll_loss2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, int64_t reduction, int64_t ignore_index, AtenTensorHandle total_weight, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_smooth_l1_loss_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double beta); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_smooth_l1_loss_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double beta); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_smooth_l1_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double beta, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_huber_loss_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double delta); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_huber_loss(AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double delta, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_huber_loss_backward_out(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double delta); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_huber_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, double delta, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_soft_margin_loss_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_soft_margin_loss(AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_soft_margin_loss_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_soft_margin_loss_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle target, int64_t reduction, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_elu_out(AtenTensorHandle out, AtenTensorHandle self, double alpha, double scale, double input_scale); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_elu_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, double alpha, double scale, double input_scale, int32_t is_result, AtenTensorHandle self_or_result); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_glu_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_glu_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_glu_backward(AtenTensorHandle grad_output, AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_glu_jvp(AtenTensorHandle glu, AtenTensorHandle x, AtenTensorHandle dx, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_glu_backward_jvp(AtenTensorHandle grad_x, AtenTensorHandle grad_glu, AtenTensorHandle x, AtenTensorHandle dgrad_glu, AtenTensorHandle dx, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardsigmoid_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardsigmoid_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardtanh_out(AtenTensorHandle out, AtenTensorHandle self, double min_val, double max_val); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardtanh(AtenTensorHandle self, double min_val, double max_val, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardtanh_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, double min_val, double max_val); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardtanh_backward(AtenTensorHandle grad_output, AtenTensorHandle self, double min_val, double max_val, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardtanh_(AtenTensorHandle self, double min_val, double max_val, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardswish_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardswish(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardswish_(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardswish_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_leaky_relu_out(AtenTensorHandle out, AtenTensorHandle self, double negative_slope); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_leaky_relu_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, double negative_slope, int32_t self_is_result); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_log_sigmoid_forward_output(AtenTensorHandle output, AtenTensorHandle buffer, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_log_sigmoid_forward(AtenTensorHandle self, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_log_sigmoid_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle buffer); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_log_sigmoid_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle buffer, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rrelu_with_noise_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle noise, double lower, double upper, int32_t training, int32_t self_is_result, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_softplus_out(AtenTensorHandle out, AtenTensorHandle self, double beta, double threshold); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_softplus_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, double beta, double threshold); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_softshrink_out(AtenTensorHandle out, AtenTensorHandle self, double lambd); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_softshrink_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, double lambd); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_adaptive_avg_pool2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__adaptive_avg_pool2d(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__adaptive_avg_pool2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_adaptive_avg_pool3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__adaptive_avg_pool3d(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_adaptive_avg_pool3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__adaptive_avg_pool3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_adaptive_max_pool2d_out(AtenTensorHandle out, AtenTensorHandle indices, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_adaptive_max_pool2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle indices); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_adaptive_max_pool3d_out(AtenTensorHandle out, AtenTensorHandle indices, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_adaptive_max_pool3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle indices); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_avg_pool2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_avg_pool2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_avg_pool3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_avg_pool3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, int32_t ceil_mode, int32_t count_include_pad, int64_t* divisor_override); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fractional_max_pool2d_output(AtenTensorHandle output, AtenTensorHandle indices, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle random_samples); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fractional_max_pool2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle indices); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fractional_max_pool3d_output(AtenTensorHandle output, AtenTensorHandle indices, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle random_samples); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fractional_max_pool3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle indices); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fractional_max_pool3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle indices, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_pool2d_with_indices_out(AtenTensorHandle out, AtenTensorHandle indices, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_pool2d_with_indices_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle indices); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_pool3d_with_indices_out(AtenTensorHandle out, AtenTensorHandle indices, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_pool3d_with_indices(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_pool3d_with_indices_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle indices); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_pool3d_with_indices_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode, AtenTensorHandle indices, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_unpool2d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle indices, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_unpool2d(AtenTensorHandle self, AtenTensorHandle indices, const int64_t* output_size, int64_t output_size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_unpool3d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle indices, const int64_t* output_size, int64_t output_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_unpool3d(AtenTensorHandle self, AtenTensorHandle indices, const int64_t* output_size, int64_t output_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_reflection_pad1d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_reflection_pad1d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_reflection_pad2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_reflection_pad2d(AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_reflection_pad2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_reflection_pad2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_reflection_pad3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_reflection_pad3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_replication_pad1d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_replication_pad1d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_replication_pad2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_replication_pad2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_replication_pad2d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_replication_pad3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_replication_pad3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_replication_pad3d_backward(AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_linear1d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, int32_t align_corners, double* scales); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_linear1d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_bilinear2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_bilinear2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__upsample_bilinear2d_aa_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__upsample_bilinear2d_aa_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_bicubic2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_bicubic2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__upsample_bicubic2d_aa_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__upsample_bicubic2d_aa_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_trilinear3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, int32_t align_corners, double* scales_d, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_trilinear3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, int32_t align_corners, double* scales_d, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_nearest1d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, double* scales); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__upsample_nearest_exact1d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, double* scales); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_nearest1d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, double* scales); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__upsample_nearest_exact1d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, double* scales); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_nearest2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__upsample_nearest_exact2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_nearest2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__upsample_nearest_exact2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_nearest3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, double* scales_d, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__upsample_nearest_exact3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, double* scales_d, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_upsample_nearest3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, double* scales_d, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__upsample_nearest_exact3d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_output, const int64_t* output_size, int64_t output_size_len_, const int64_t* input_size, int64_t input_size_len_, double* scales_d, double* scales_h, double* scales_w); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slow_conv_transpose2d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* dilation, int64_t dilation_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slow_conv_transpose3d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* dilation, int64_t dilation_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slow_conv_transpose3d(AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* dilation, int64_t dilation_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__slow_conv2d_forward_output(AtenTensorHandle output, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__slow_conv2d_forward(AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__slow_conv2d_backward_grad_input(AtenTensorHandle grad_input, AtenTensorHandle grad_weight, AtenTensorHandle grad_bias, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__slow_conv2d_backward_output_mask(AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int32_t* output_mask, int64_t output_mask_len_, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__conv_depthwise2d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__conv_depthwise2d(AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_conv_depthwise3d(AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slow_conv_dilated2d(AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slow_conv_dilated3d(AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_col2im_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* dilation, int64_t dilation_len_, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_col2im(AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* dilation, int64_t dilation_len_, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_im2col_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* dilation, int64_t dilation_len_, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_im2col(AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* dilation, int64_t dilation_len_, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fft_fftfreq(int64_t n, double d, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fft_fftfreq_out(AtenTensorHandle out, int64_t n, double d); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fft_rfftfreq(int64_t n, double d, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fft_rfftfreq_out(AtenTensorHandle out, int64_t n, double d); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_cholesky_ex_L(AtenTensorHandle L, AtenTensorHandle info, AtenTensorHandle self, int32_t upper, int32_t check_errors); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_cross_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_lu_factor_ex_out(AtenTensorHandle LU, AtenTensorHandle pivots, AtenTensorHandle info, AtenTensorHandle A, int32_t pivot, int32_t check_errors); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_lu_out(AtenTensorHandle P, AtenTensorHandle L, AtenTensorHandle U, AtenTensorHandle A, int32_t pivot); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_lu_solve_out(AtenTensorHandle out, AtenTensorHandle LU, AtenTensorHandle pivots, AtenTensorHandle B, int32_t left, int32_t adjoint); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__linalg_det_result(AtenTensorHandle result, AtenTensorHandle LU, AtenTensorHandle pivots, AtenTensorHandle A); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_ldl_factor_ex_out(AtenTensorHandle LD, AtenTensorHandle pivots, AtenTensorHandle info, AtenTensorHandle self, int32_t hermitian, int32_t check_errors); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_ldl_solve_out(AtenTensorHandle out, AtenTensorHandle LD, AtenTensorHandle pivots, AtenTensorHandle B, int32_t hermitian); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_lstsq(AtenTensorHandle self, AtenTensorHandle b, double* rcond, const char** driver, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_lstsq_out(AtenTensorHandle solution, AtenTensorHandle residuals, AtenTensorHandle rank, AtenTensorHandle singular_values, AtenTensorHandle self, AtenTensorHandle b, double* rcond, const char** driver); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_matrix_exp(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__linalg_slogdet_sign(AtenTensorHandle sign, AtenTensorHandle logabsdet, AtenTensorHandle LU, AtenTensorHandle pivots, AtenTensorHandle A); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_eig(AtenTensorHandle self, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_eig_out(AtenTensorHandle eigenvalues, AtenTensorHandle eigenvectors, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__linalg_eigvals(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_eigvals_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__linalg_eigh_eigenvalues(AtenTensorHandle eigenvalues, AtenTensorHandle eigenvectors, AtenTensorHandle A, const char* UPLO, int32_t compute_v); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_householder_product(AtenTensorHandle input, AtenTensorHandle tau, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_householder_product_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle tau); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_inv_ex_inverse(AtenTensorHandle inverse, AtenTensorHandle info, AtenTensorHandle A, int32_t check_errors); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_vector_norm_out(AtenTensorHandle out, AtenTensorHandle self, double ord, const int64_t** dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__linalg_svd_U(AtenTensorHandle U, AtenTensorHandle S, AtenTensorHandle Vh, AtenTensorHandle A, int32_t full_matrices, int32_t compute_uv, const char** driver); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_pinv_atol_rtol_tensor(AtenTensorHandle self, AtenTensorHandle* atol, AtenTensorHandle* rtol, int32_t hermitian, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_pinv_atol_rtol_tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle* atol, AtenTensorHandle* rtol, int32_t hermitian); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__linalg_solve_ex_result(AtenTensorHandle result, AtenTensorHandle LU, AtenTensorHandle pivots, AtenTensorHandle info, AtenTensorHandle A, AtenTensorHandle B, int32_t left, int32_t check_errors); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_qr_out(AtenTensorHandle Q, AtenTensorHandle R, AtenTensorHandle A, const char* mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__test_parallel_materialize(AtenTensorHandle self, int64_t num_parallel, int32_t skip_first, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__test_warn_in_autograd(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__test_autograd_multiple_dispatch_fullcoverage(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__test_autograd_multiple_dispatch_view(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__test_autograd_multiple_dispatch_view_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_segment_reduce(AtenTensorHandle data, const char* reduce, AtenTensorHandle* lengths, AtenTensorHandle* indices, AtenTensorHandle* offsets, int64_t axis, int32_t unsafe, double* initial, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__segment_reduce_backward(AtenTensorHandle grad, AtenTensorHandle output, AtenTensorHandle data, const char* reduce, AtenTensorHandle* lengths, AtenTensorHandle* offsets, int64_t axis, double* initial, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_tensor_from_tensor_list(const AtenTensorHandle* list, int64_t list_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fw_primal_copy(AtenTensorHandle self, int64_t level, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__make_dual_copy(AtenTensorHandle primal, AtenTensorHandle tangent, int64_t level, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_as_real_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_as_complex_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__conj_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__neg_view_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_as_strided_copy(AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int64_t* storage_offset, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_broadcast_to_copy(AtenTensorHandle self, const int64_t* size, int64_t size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_diagonal_copy(AtenTensorHandle self, int64_t offset, int64_t dim1, int64_t dim2, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_expand_copy(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t implicit, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_permute_copy(AtenTensorHandle self, const int64_t* dims, int64_t dims_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__reshape_alias_copy(AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_select_copy_int(AtenTensorHandle self, int64_t dim, int64_t index, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_detach_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slice_copy_Tensor(AtenTensorHandle self, int64_t dim, int64_t* start, int64_t* end, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_squeeze_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_squeeze_copy_dim(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_squeeze_copy_dims(AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_t_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_transpose_copy_int(AtenTensorHandle self, int64_t dim0, int64_t dim1, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unsqueeze_copy(AtenTensorHandle self, int64_t dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__indices_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__values_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_indices_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_values_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_crow_indices_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_col_indices_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_ccol_indices_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_row_indices_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unbind_copy_int_out(const AtenTensorHandle* out, int64_t out_len_, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_split_copy_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, AtenTensorHandle self, int64_t split_size, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_split_with_sizes_copy_out(const AtenTensorHandle* out, int64_t out_len_, AtenTensorHandle self, const int64_t* split_sizes, int64_t split_sizes_len_, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_copy(AtenTensorHandle self, const int64_t* size, int64_t size_len_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_copy_dtype(AtenTensorHandle self, int32_t dtype, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unfold_copy(AtenTensorHandle self, int64_t dimension, int64_t size, int64_t step, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_alias_copy(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__transformer_encoder_layer_fwd(AtenTensorHandle src, int64_t embed_dim, int64_t num_heads, AtenTensorHandle qkv_weight, AtenTensorHandle qkv_bias, AtenTensorHandle proj_weight, AtenTensorHandle proj_bias, int32_t use_gelu, int32_t norm_first, double eps, AtenTensorHandle norm_weight_1, AtenTensorHandle norm_bias_1, AtenTensorHandle norm_weight_2, AtenTensorHandle norm_bias_2, AtenTensorHandle ffn_weight_1, AtenTensorHandle ffn_bias_1, AtenTensorHandle ffn_weight_2, AtenTensorHandle ffn_bias_2, AtenTensorHandle* mask, int64_t* mask_type, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__native_multi_head_attention(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, int64_t embed_dim, int64_t num_head, AtenTensorHandle qkv_weight, AtenTensorHandle qkv_bias, AtenTensorHandle proj_weight, AtenTensorHandle proj_bias, AtenTensorHandle* mask, int32_t need_weights, int32_t average_attn_weights, int64_t* mask_type, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_sdp_choice(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* attn_mask, double dropout_p, int32_t is_causal, double* scale, int64_t* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_dot_product_flash_attention(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, double dropout_p, int32_t is_causal, int32_t return_debug_mask, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, int64_t* ret4, int64_t* ret5, AtenTensorHandle* ret6, AtenTensorHandle* ret7, AtenTensorHandle* ret8); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_dot_product_flash_attention_backward(AtenTensorHandle grad_out, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle out, AtenTensorHandle logsumexp, AtenTensorHandle cum_seq_q, AtenTensorHandle cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, int32_t is_causal, AtenTensorHandle philox_seed, AtenTensorHandle philox_offset, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_dot_product_efficient_attention(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* attn_bias, int32_t compute_log_sumexp, double dropout_p, int32_t is_causal, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_dot_product_efficient_attention_backward(AtenTensorHandle grad_out_, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle attn_bias, AtenTensorHandle out, AtenTensorHandle logsumexp, AtenTensorHandle philox_seed, AtenTensorHandle philox_offset, double dropout_p, const int32_t* grad_input_mask, int64_t grad_input_mask_len_, int32_t is_causal, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__scaled_dot_product_cudnn_attention(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, double dropout_p, int32_t is_causal, int32_t return_debug_mask, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__flash_attention_forward(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* cum_seq_q, AtenTensorHandle* cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, int32_t is_causal, int32_t return_debug_mask, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__flash_attention_backward(AtenTensorHandle grad_out, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle out, AtenTensorHandle logsumexp, AtenTensorHandle cum_seq_q, AtenTensorHandle cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, int32_t is_causal, AtenTensorHandle philox_seed, AtenTensorHandle philox_offset, double* scale, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__efficient_attention_forward(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* bias, AtenTensorHandle* cu_seqlens_q, AtenTensorHandle* cu_seqlens_k, int64_t* max_seqlen_q, int64_t* max_seqlen_k, double dropout_p, int64_t custom_mask_type, int32_t compute_log_sumexp, double* scale, AtenTensorHandle* causal_diagonal, AtenTensorHandle* seqlen_k, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, int64_t* ret4, int64_t* ret5); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__efficient_attention_backward(AtenTensorHandle grad_out_, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, AtenTensorHandle* bias, AtenTensorHandle out, AtenTensorHandle* cu_seqlens_q, AtenTensorHandle* cu_seqlens_k, int64_t max_seqlen_q, int64_t max_seqlen_k, AtenTensorHandle logsumexp, double dropout_p, AtenTensorHandle philox_seed, AtenTensorHandle philox_offset, int64_t custom_mask_type, int32_t bias_requires_grad, double* scale, int64_t* num_splits_key, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__triton_scaled_dot_attention(AtenTensorHandle q, AtenTensorHandle k, AtenTensorHandle v, double dropout_p, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fill_mem_eff_dropout_mask_(AtenTensorHandle self, double dropout_p, int64_t seed, int64_t offset, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__triton_multi_head_attention(AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, int64_t embed_dim, int64_t num_head, AtenTensorHandle qkv_weight, AtenTensorHandle qkv_bias, AtenTensorHandle proj_weight, AtenTensorHandle proj_bias, AtenTensorHandle* mask, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_adam_(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* exp_avgs, int64_t exp_avgs_len_, const AtenTensorHandle* exp_avg_sqs, int64_t exp_avg_sqs_len_, const AtenTensorHandle* max_exp_avg_sqs, int64_t max_exp_avg_sqs_len_, const AtenTensorHandle* state_steps, int64_t state_steps_len_, double lr, double beta1, double beta2, double weight_decay, double eps, int32_t amsgrad, int32_t maximize, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_adam__tensor_lr(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* exp_avgs, int64_t exp_avgs_len_, const AtenTensorHandle* exp_avg_sqs, int64_t exp_avg_sqs_len_, const AtenTensorHandle* max_exp_avg_sqs, int64_t max_exp_avg_sqs_len_, const AtenTensorHandle* state_steps, int64_t state_steps_len_, AtenTensorHandle lr, double beta1, double beta2, double weight_decay, double eps, int32_t amsgrad, int32_t maximize, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_adamw_(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* exp_avgs, int64_t exp_avgs_len_, const AtenTensorHandle* exp_avg_sqs, int64_t exp_avg_sqs_len_, const AtenTensorHandle* max_exp_avg_sqs, int64_t max_exp_avg_sqs_len_, const AtenTensorHandle* state_steps, int64_t state_steps_len_, double lr, double beta1, double beta2, double weight_decay, double eps, int32_t amsgrad, int32_t maximize, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_adamw__tensor_lr(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* exp_avgs, int64_t exp_avgs_len_, const AtenTensorHandle* exp_avg_sqs, int64_t exp_avg_sqs_len_, const AtenTensorHandle* max_exp_avg_sqs, int64_t max_exp_avg_sqs_len_, const AtenTensorHandle* state_steps, int64_t state_steps_len_, AtenTensorHandle lr, double beta1, double beta2, double weight_decay, double eps, int32_t amsgrad, int32_t maximize, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_sgd_(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* momentum_buffer_list, int64_t momentum_buffer_list_len_, double weight_decay, double momentum, double lr, double dampening, int32_t nesterov, int32_t maximize, int32_t is_first_step, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_sgd__tensor_lr(const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* momentum_buffer_list, int64_t momentum_buffer_list_len_, double weight_decay, double momentum, AtenTensorHandle lr, double dampening, int32_t nesterov, int32_t maximize, int32_t is_first_step, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__new_zeros_with_same_feature_meta_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other, int64_t self_num_batch_dims); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cudnn_ctc_loss_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle log_probs, AtenTensorHandle targets, const int64_t* input_lengths, int64_t input_lengths_len_, const int64_t* target_lengths, int64_t target_lengths_len_, int64_t blank, int32_t deterministic, int32_t zero_infinity); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cudnn_rnn_flatten_weight_out(AtenTensorHandle out, const AtenTensorHandle* weight_arr, int64_t weight_arr_len_, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int32_t batch_first, int32_t bidirectional); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cudnn_rnn_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle out4, AtenTensorHandle input, const AtenTensorHandle* weight, int64_t weight_len_, int64_t weight_stride0, AtenTensorHandle* weight_buf, AtenTensorHandle hx, AtenTensorHandle* cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int32_t batch_first, double dropout, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, AtenTensorHandle* dropout_state); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cudnn_rnn_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, const AtenTensorHandle* out3, int64_t out3_len_, AtenTensorHandle input, const AtenTensorHandle* weight, int64_t weight_len_, int64_t weight_stride0, AtenTensorHandle weight_buf, AtenTensorHandle hx, AtenTensorHandle* cx, AtenTensorHandle output, AtenTensorHandle* grad_output, AtenTensorHandle* grad_hy, AtenTensorHandle* grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int32_t batch_first, double dropout, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, AtenTensorHandle* dropout_state, AtenTensorHandle reserve, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cudnn_init_dropout_state_out(AtenTensorHandle out, double dropout, int32_t train, int64_t dropout_seed); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__masked_scale_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask, double scale); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_dropout_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input, double p, int32_t* train); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_dropout_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle mask, double scale); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__conj_physical_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__add_relu_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_add_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_affine_grid_generator_out(AtenTensorHandle out, AtenTensorHandle theta, const int64_t* size, int64_t size_len_, int32_t align_corners); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__test_functorch_fallback_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bartlett_window_out(AtenTensorHandle out, int64_t window_length); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bartlett_window_periodic_out(AtenTensorHandle out, int64_t window_length, int32_t periodic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_quantized_batch_norm_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle mean, AtenTensorHandle var, double eps, double output_scale, int64_t output_zero_point); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_binary_cross_entropy_with_logits_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle target, AtenTensorHandle* weight, AtenTensorHandle* pos_weight, int64_t reduction); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bincount_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle* weights, int64_t minlength); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_blackman_window_out(AtenTensorHandle out, int64_t window_length); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_blackman_window_periodic_out(AtenTensorHandle out, int64_t window_length, int32_t periodic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_block_diag_out(AtenTensorHandle out, const AtenTensorHandle* tensors, int64_t tensors_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_constant_pad_nd_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* pad, int64_t pad_len_, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_convolution_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_convolution_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle weight, const int64_t** bias_sizes, int64_t bias_sizes_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_convolution_overrideable_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_convolution_backward_overrideable_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle weight, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__convolution_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t transposed, const int64_t* output_padding, int64_t output_padding_len_, int64_t groups, int32_t benchmark, int32_t deterministic, int32_t cudnn_enabled, int32_t allow_tf32); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_conv_tbc_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle bias, int64_t pad); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_copy_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle src, int32_t non_blocking); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__copy_from_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle dst, int32_t non_blocking); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__copy_from_and_resize_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle dst); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_count_nonzero_dim_IntList_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_count_nonzero_out(AtenTensorHandle out, AtenTensorHandle self, int64_t* dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_affine_grid_generator_out(AtenTensorHandle out, AtenTensorHandle theta, int64_t N, int64_t C, int64_t H, int64_t W); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_affine_grid_generator_backward_out(AtenTensorHandle out, AtenTensorHandle grad, int64_t N, int64_t C, int64_t H, int64_t W); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_batch_norm_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, int32_t training, double exponential_average_factor, double epsilon); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_batch_norm_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input, AtenTensorHandle grad_output, AtenTensorHandle weight, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, AtenTensorHandle* save_mean, AtenTensorHandle* save_var, double epsilon, AtenTensorHandle reserveSpace); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_convolution_transpose_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic, int32_t allow_tf32); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__mps_convolution_transpose_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mps_convolution_transpose_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, AtenTensorHandle grad_output, AtenTensorHandle weight, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_convolution_relu_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_convolution_add_relu_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle z, double* alpha, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_grid_sampler_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle grid); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_cudnn_grid_sampler_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, AtenTensorHandle grid, AtenTensorHandle grad_output); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__ctc_loss_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle log_probs, AtenTensorHandle targets, const int64_t* input_lengths, int64_t input_lengths_len_, const int64_t* target_lengths, int64_t target_lengths_len_, int64_t blank, int32_t zero_infinity); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__ctc_loss_Tensor_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle log_probs, AtenTensorHandle targets, AtenTensorHandle input_lengths, AtenTensorHandle target_lengths, int64_t blank, int32_t zero_infinity); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__ctc_loss_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle log_probs, AtenTensorHandle targets, const int64_t* input_lengths, int64_t input_lengths_len_, const int64_t* target_lengths, int64_t target_lengths_len_, AtenTensorHandle neg_log_likelihood, AtenTensorHandle log_alpha, int64_t blank, int32_t zero_infinity); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_diag_embed_out(AtenTensorHandle out, AtenTensorHandle self, int64_t offset, int64_t dim1, int64_t dim2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_diagonal_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t offset, int64_t dim1, int64_t dim2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_div_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_div_Scalar_mode_out(AtenTensorHandle out, AtenTensorHandle self, double other, const char** rounding_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_embedding_out(AtenTensorHandle out, AtenTensorHandle weight, AtenTensorHandle indices, int64_t padding_idx, int32_t scale_grad_by_freq, int32_t sparse); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_embedding_dense_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle indices, int64_t num_weights, int64_t padding_idx, int32_t scale_grad_by_freq); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_embedding_renorm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle indices, double max_norm, double norm_type); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_embedding_renorm(AtenTensorHandle self, AtenTensorHandle indices, double max_norm, double norm_type, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__embedding_bag_forward_only_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, int32_t scale_grad_by_freq, int64_t mode, int32_t sparse, AtenTensorHandle* per_sample_weights, int32_t include_last_offset, int64_t padding_idx); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__embedding_bag_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, int32_t scale_grad_by_freq, int64_t mode, int32_t sparse, AtenTensorHandle* per_sample_weights, int32_t include_last_offset, int64_t padding_idx); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__embedding_bag_dense_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle indices, AtenTensorHandle offset2bag, AtenTensorHandle bag_size, AtenTensorHandle maximum_indices, int64_t num_weights, int32_t scale_grad_by_freq, int64_t mode, AtenTensorHandle* per_sample_weights, int64_t padding_idx); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__embedding_bag_per_sample_weights_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle weight, AtenTensorHandle indices, AtenTensorHandle offsets, AtenTensorHandle offset2bag, int64_t mode, int64_t padding_idx); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_empty_permuted_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_, const int64_t* physical_layout, int64_t physical_layout_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_new_empty_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_new_empty_strided_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_new_full_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, double fill_value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_new_zeros_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_new_ones_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__empty_affine_quantized_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_, double scale, int64_t zero_point, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__empty_per_channel_affine_quantized_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_, AtenTensorHandle scales, AtenTensorHandle zero_points, int64_t axis, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_resize_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_resize(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__resize_output_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t device, int32_t device_index_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__resize_output(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t device, int32_t device_index_, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_empty_quantized_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_, AtenTensorHandle qtensor, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_empty_like_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_empty_strided_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fill_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fill_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_floor_divide_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_full_like_out(AtenTensorHandle out, AtenTensorHandle self, double fill_value, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_from_file_out(AtenTensorHandle out, const char* filename, int32_t* shared, int64_t* size); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_grid_sampler_2d_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_grid_sampler_2d_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__grid_sampler_2d_cpu_fallback_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_grid_sampler_3d_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_grid_sampler_3d_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle grid, int64_t interpolation_mode, int64_t padding_mode, int32_t align_corners, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hann_window_out(AtenTensorHandle out, int64_t window_length); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hann_window_periodic_out(AtenTensorHandle out, int64_t window_length, int32_t periodic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hamming_window_out(AtenTensorHandle out, int64_t window_length); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hamming_window_periodic_out(AtenTensorHandle out, int64_t window_length, int32_t periodic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hamming_window_periodic_alpha_out(AtenTensorHandle out, int64_t window_length, int32_t periodic, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hamming_window_periodic_alpha_beta_out(AtenTensorHandle out, int64_t window_length, int32_t periodic, double alpha, double beta); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_kaiser_window_out(AtenTensorHandle out, int64_t window_length); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_kaiser_window_periodic_out(AtenTensorHandle out, int64_t window_length, int32_t periodic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_kaiser_window_beta_out(AtenTensorHandle out, int64_t window_length, int32_t periodic, double beta); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_group_norm_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_group_norm_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle rstd, AtenTensorHandle* weight, int64_t N, int64_t C, int64_t HxW, int64_t group, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_put_out(AtenTensorHandle out, AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__index_put_impl_out(AtenTensorHandle out, AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, int32_t unsafe); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__index_put_impl(AtenTensorHandle self, const AtenTensorHandle** indices, int64_t indices_len_, AtenTensorHandle values, int32_t accumulate, int32_t unsafe, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_isnan_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_layer_norm_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input, const int64_t* normalized_shape, int64_t normalized_shape_len_, AtenTensorHandle* weight, AtenTensorHandle* bias, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_layer_norm_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle grad_out, AtenTensorHandle input, const int64_t* normalized_shape, int64_t normalized_shape_len_, AtenTensorHandle mean, AtenTensorHandle rstd, AtenTensorHandle* weight, AtenTensorHandle* bias, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linear_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, AtenTensorHandle grad_output, AtenTensorHandle weight, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_linear_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_linear_backward_input_out(AtenTensorHandle out, const int64_t* input_size, int64_t input_size_len_, AtenTensorHandle grad_output, AtenTensorHandle weight); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_linear_backward_weights_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle grad_output, AtenTensorHandle input, AtenTensorHandle weight, int32_t bias_defined); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_linear_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, AtenTensorHandle grad_output, AtenTensorHandle weight, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_matmul_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle grad, AtenTensorHandle self, AtenTensorHandle other, const int32_t* mask, int64_t mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__aminmax_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__aminmax_dim_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, int64_t dim, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_max_pool2d_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_max_pool2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_max_pool2d_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle output, AtenTensorHandle input, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_max_pool3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_max_pool3d_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle output, AtenTensorHandle input, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_quantized_max_pool1d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_quantized_max_pool2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_quantized_max_pool3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_, int32_t ceil_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_median_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_nanmedian_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__mps_convolution_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mps_convolution_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, AtenTensorHandle grad_output, AtenTensorHandle weight, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_convolution_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_rnn_layer_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle input, AtenTensorHandle weight0, AtenTensorHandle weight1, AtenTensorHandle weight2, AtenTensorHandle weight3, AtenTensorHandle hx_, AtenTensorHandle cx_, int32_t reverse, const int64_t* batch_sizes, int64_t batch_sizes_len_, int64_t mode, int64_t hidden_size, int64_t num_layers, int32_t has_biases, int32_t bidirectional, int32_t batch_first, int32_t train); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_rnn_layer_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle out4, AtenTensorHandle out5, AtenTensorHandle out6, AtenTensorHandle input, AtenTensorHandle weight1, AtenTensorHandle weight2, AtenTensorHandle weight3, AtenTensorHandle weight4, AtenTensorHandle hx_, AtenTensorHandle cx_tmp, AtenTensorHandle output, AtenTensorHandle hy_, AtenTensorHandle cy_, AtenTensorHandle* grad_output, AtenTensorHandle* grad_hy, AtenTensorHandle* grad_cy, int32_t reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, int32_t has_biases, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, int32_t batch_first, AtenTensorHandle workspace); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_batch_norm_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, int32_t training, double exponential_average_factor, double epsilon); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_batch_norm_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input, AtenTensorHandle grad_output, AtenTensorHandle weight, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, AtenTensorHandle* save_mean, AtenTensorHandle* save_var, double epsilon); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_convolution_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_convolution_transpose_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* output_padding, int64_t output_padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_depthwise_convolution_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, int32_t benchmark, int32_t deterministic); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_rnn_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle out4, AtenTensorHandle input, const AtenTensorHandle* weight, int64_t weight_len_, int64_t weight_stride0, AtenTensorHandle hx, AtenTensorHandle* cx, int64_t mode, int64_t hidden_size, int64_t num_layers, int32_t batch_first, double dropout, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, AtenTensorHandle* dropout_state); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_miopen_rnn_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, const AtenTensorHandle* out3, int64_t out3_len_, AtenTensorHandle input, const AtenTensorHandle* weight, int64_t weight_len_, int64_t weight_stride0, AtenTensorHandle weight_buf, AtenTensorHandle hx, AtenTensorHandle* cx, AtenTensorHandle output, AtenTensorHandle* grad_output, AtenTensorHandle* grad_hy, AtenTensorHandle* grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, int32_t batch_first, double dropout, int32_t train, int32_t bidirectional, const int64_t* batch_sizes, int64_t batch_sizes_len_, AtenTensorHandle* dropout_state, AtenTensorHandle reserve, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_sparse_matmul_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mul_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__native_batch_norm_legit_functional(AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle running_mean, AtenTensorHandle running_var, int32_t training, double momentum, double eps, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__native_batch_norm_legit_no_training_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* bias, AtenTensorHandle running_mean, AtenTensorHandle running_var, double momentum, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_stats_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input, double eps); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_gather_stats_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle invstd, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, double momentum, double eps, int64_t count); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_gather_stats_with_counts_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle invstd, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, double momentum, double eps, AtenTensorHandle counts); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_batch_norm_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle* weight, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, AtenTensorHandle* save_mean, AtenTensorHandle* save_invstd, int32_t train, double eps, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_backward_reduce_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle invstd, AtenTensorHandle* weight, int32_t input_g, int32_t weight_g, int32_t bias_g); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_backward_elemt_out(AtenTensorHandle out, AtenTensorHandle grad_out, AtenTensorHandle input, AtenTensorHandle mean, AtenTensorHandle invstd, AtenTensorHandle* weight, AtenTensorHandle sum_dy, AtenTensorHandle sum_dy_xmu, AtenTensorHandle count); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_batch_norm_update_stats_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input, AtenTensorHandle* running_mean, AtenTensorHandle* running_var, double momentum); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nnpack_spatial_convolution_out(AtenTensorHandle out, AtenTensorHandle input, AtenTensorHandle weight, AtenTensorHandle* bias, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_ones_like_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__euclidean_dist_out(AtenTensorHandle out, AtenTensorHandle x1, AtenTensorHandle x2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cdist_forward_out(AtenTensorHandle out, AtenTensorHandle x1, AtenTensorHandle x2, double p, int64_t* compute_mode); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cdist_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle x1, AtenTensorHandle x2, double p, AtenTensorHandle cdist); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__pdist_forward_out(AtenTensorHandle out, AtenTensorHandle self, double p); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__pdist_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle self, double p, AtenTensorHandle pdist); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_pixel_shuffle_out(AtenTensorHandle out, AtenTensorHandle self, int64_t upscale_factor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_pixel_unshuffle_out(AtenTensorHandle out, AtenTensorHandle self, int64_t downscale_factor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_channel_shuffle_out(AtenTensorHandle out, AtenTensorHandle self, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__pin_memory_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* device, int32_t device_index_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_scalar_tensor_out(AtenTensorHandle out, double s); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rand_like_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randint_like_out(AtenTensorHandle out, AtenTensorHandle self, int64_t high, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randint_like_low_dtype_out(AtenTensorHandle out, AtenTensorHandle self, int64_t low, int64_t high, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_randn_like_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_repeat_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* repeats, int64_t repeats_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_repeat_interleave_Tensor_out(AtenTensorHandle out, AtenTensorHandle repeats, int64_t* output_size); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__mkldnn_reshape_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* shape, int64_t shape_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_relu_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_select_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t dim, int64_t index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_celu_out(AtenTensorHandle out, AtenTensorHandle self, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slice_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t dim, int64_t start, int64_t end, int64_t step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slice_scatter_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle src, int64_t dim, int64_t* start, int64_t* end, int64_t step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_select_scatter_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle src, int64_t dim, int64_t index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_diagonal_scatter_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle src, int64_t offset, int64_t dim1, int64_t dim2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_as_strided_scatter_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle src, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int64_t* storage_offset); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unsafe_split_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, AtenTensorHandle self, int64_t split_size, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unsafe_split_with_sizes_out(const AtenTensorHandle* out, int64_t out_len_, AtenTensorHandle self, const int64_t* split_sizes, int64_t split_sizes_len_, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sum_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_std_mean_correction_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_prod_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__mkldnn_transpose_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim0, int64_t dim1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_flip_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dims, int64_t dims_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_roll_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* shifts, int64_t shifts_len_, const int64_t* dims, int64_t dims_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rot90_out(AtenTensorHandle out, AtenTensorHandle self, int64_t k, const int64_t* dims, int64_t dims_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__transform_bias_rescale_qkv_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle qkv, AtenTensorHandle qkv_bias, int64_t num_heads); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_tensor_from_mask_out(AtenTensorHandle out, AtenTensorHandle t, AtenTensorHandle mask, int32_t mask_check); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_from_padded_out(AtenTensorHandle out, AtenTensorHandle padded, AtenTensorHandle cpu_nested_shape_example, int32_t fuse_transform_0213); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_tensor_size_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_tensor_strides_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_tensor_storage_offsets_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_from_padded_and_nested_example_out(AtenTensorHandle out, AtenTensorHandle padded, AtenTensorHandle nt_example); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_view_from_buffer_copy_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle nested_size, AtenTensorHandle nested_strides, AtenTensorHandle offsets); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_view_from_jagged_copy_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle offsets, AtenTensorHandle dummy, AtenTensorHandle* lengths, int64_t ragged_idx); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_get_values_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__trilinear_out(AtenTensorHandle out, AtenTensorHandle i1, AtenTensorHandle i2, AtenTensorHandle i3, const int64_t* expand1, int64_t expand1_len_, const int64_t* expand2, int64_t expand2_len_, const int64_t* expand3, int64_t expand3_len_, const int64_t* sumdim, int64_t sumdim_len_, int64_t unroll_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__unique_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, int32_t sorted, int32_t return_inverse); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unique_dim_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, int64_t dim, int32_t sorted, int32_t return_inverse, int32_t return_counts); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unique_consecutive_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, int32_t return_inverse, int32_t return_counts, int64_t* dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unique_dim_consecutive_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, int64_t dim, int32_t return_inverse, int32_t return_counts); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__unique2_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle self, int32_t sorted, int32_t return_inverse, int32_t return_counts); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__unsafe_view_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_var_mean_correction_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, const int64_t** dim, int64_t dim_len_, double* correction, int32_t keepdim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__weight_norm_interface_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle v, AtenTensorHandle g, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__weight_norm_interface_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle grad_w, AtenTensorHandle saved_v, AtenTensorHandle saved_g, AtenTensorHandle saved_norms, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__efficientzerotensor_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_zeros_like_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__standard_gamma_grad_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle output); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__dirichlet_grad_out(AtenTensorHandle out, AtenTensorHandle x, AtenTensorHandle alpha, AtenTensorHandle total); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_norm_out(AtenTensorHandle out, AtenTensorHandle self, double p); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_native_norm_ScalarOpt_dim_dtype_out(AtenTensorHandle out, AtenTensorHandle self, double* p, const int64_t* dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_sum_dim_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_sum_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_csr_sum_dim_dtype_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_csr_prod_dim_dtype_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_, int32_t keepdim, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_softmax_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t half_to_float); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_softmax_backward_data_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle output, int64_t dim, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_log_softmax_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int32_t half_to_float); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_log_softmax_backward_data_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle output, int64_t dim, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__spdiags_out(AtenTensorHandle out, AtenTensorHandle diagonals, AtenTensorHandle offsets, const int64_t* shape, int64_t shape_len_, int32_t* layout); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_norm_ScalarOpt_dtype_out(AtenTensorHandle out, AtenTensorHandle self, double* p, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_norm_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double p); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_clone_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_resize_as_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle the_template, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_resize_as(AtenTensorHandle self, AtenTensorHandle the_template, int32_t* memory_format, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_resize_as_sparse_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle the_template); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_resize_as_sparse(AtenTensorHandle self, AtenTensorHandle the_template, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_zero_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_zero(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sub_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rsub_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rsub_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_addmm_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mat1, AtenTensorHandle mat2, double beta, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sparse_coo_tensor_size_out(AtenTensorHandle out, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_coo_tensor_with_dims_out(AtenTensorHandle out, int64_t sparse_dim, int64_t dense_dim, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_coo_tensor_with_dims_and_tensors_out(AtenTensorHandle out, int64_t sparse_dim, int64_t dense_dim, const int64_t* size, int64_t size_len_, AtenTensorHandle indices, AtenTensorHandle values, int32_t* is_coalesced); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sparse_resize_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, int64_t sparse_dim, int64_t dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sparse_resize(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int64_t sparse_dim, int64_t dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sparse_resize_and_clear_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, int64_t sparse_dim, int64_t dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sparse_resize_and_clear(AtenTensorHandle self, const int64_t* size, int64_t size_len_, int64_t sparse_dim, int64_t dense_dim, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_sparse_mask_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_mask_projection_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask, int32_t accumulate_matches); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_dense_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* dtype, int32_t* masked_grad); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__coalesce_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__coalesced_out(AtenTensorHandle out, AtenTensorHandle self, int32_t coalesced); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__coalesced(AtenTensorHandle self, int32_t coalesced, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_copy_sparse_to_sparse_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle src, int32_t non_blocking); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_copy_sparse_to_sparse(AtenTensorHandle self, AtenTensorHandle src, int32_t non_blocking, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse_sparse_dim_out(AtenTensorHandle out, AtenTensorHandle self, int64_t sparse_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* layout, const int64_t** blocksize, int64_t blocksize_len_, int64_t* dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse_csr_out(AtenTensorHandle out, AtenTensorHandle self, int64_t* dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse_csc_out(AtenTensorHandle out, AtenTensorHandle self, int64_t* dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse_bsr_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* blocksize, int64_t blocksize_len_, int64_t* dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_sparse_bsc_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* blocksize, int64_t blocksize_len_, int64_t* dense_dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_to_mkldnn_out(AtenTensorHandle out, AtenTensorHandle self, int32_t* dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_reorder_conv2d_weight_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups, const int64_t** input_size, int64_t input_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_reorder_conv3d_weight_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* padding, int64_t padding_len_, const int64_t* stride, int64_t stride_len_, const int64_t* dilation, int64_t dilation_len_, int64_t groups); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_quantize_per_tensor_dynamic_out(AtenTensorHandle out, AtenTensorHandle self, int32_t dtype, int32_t reduce_range); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_quantize_per_tensor_out(AtenTensorHandle out, AtenTensorHandle self, double scale, int64_t zero_point, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_quantize_per_tensor_tensor_qparams_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_quantize_per_tensor_tensors_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* tensors, int64_t tensors_len_, AtenTensorHandle scales, AtenTensorHandle zero_points, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_quantize_per_channel_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle scales, AtenTensorHandle zero_points, int64_t axis, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_dequantize_self_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_dequantize_tensors_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* tensors, int64_t tensors_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_q_per_channel_scales_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_q_per_channel_zero_points_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_int_repr_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__make_per_tensor_quantized_tensor_out(AtenTensorHandle out, AtenTensorHandle self, double scale, int64_t zero_point); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__make_per_channel_quantized_tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fake_quantize_per_tensor_affine_cachemask_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, AtenTensorHandle fake_quant_enabled, int64_t quant_min, int64_t quant_max); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fake_quantize_learnable_per_tensor_affine_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t quant_min, int64_t quant_max, double grad_factor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_fake_quantize_per_channel_affine_cachemask_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fake_quantize_learnable_per_channel_affine_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle scale, AtenTensorHandle zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_moving_avg_obs_fq_helper_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle self, AtenTensorHandle observer_on, AtenTensorHandle fake_quant_on, AtenTensorHandle running_min, AtenTensorHandle running_max, AtenTensorHandle scale, AtenTensorHandle zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int32_t per_row_fake_quant, int32_t symmetric_quant); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_moving_avg_obs_fq_helper_functional(AtenTensorHandle self, AtenTensorHandle observer_on, AtenTensorHandle fake_quant_on, AtenTensorHandle running_min, AtenTensorHandle running_max, AtenTensorHandle scale, AtenTensorHandle zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int32_t per_row_fake_quant, int32_t symmetric_quant, AtenTensorHandle* ret0, AtenTensorHandle* ret1, AtenTensorHandle* ret2, AtenTensorHandle* ret3, AtenTensorHandle* ret4, AtenTensorHandle* ret5); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__to_copy_out(AtenTensorHandle out, AtenTensorHandle self, int32_t non_blocking, int32_t* memory_format); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__lstm_mps_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle out4, AtenTensorHandle out5, AtenTensorHandle input, const AtenTensorHandle* hx, int64_t hx_len_, const AtenTensorHandle* params, int64_t params_len_, int32_t has_biases, int64_t num_layers, double dropout, int32_t train, int32_t bidirectional, int32_t batch_first); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_lstm_mps_backward_out(AtenTensorHandle out0, const AtenTensorHandle* out1, int64_t out1_len_, const AtenTensorHandle* out2, int64_t out2_len_, AtenTensorHandle* grad_y, AtenTensorHandle* grad_hy, AtenTensorHandle* grad_cy, AtenTensorHandle z_state, AtenTensorHandle cell_state_fwd, AtenTensorHandle input, AtenTensorHandle layersOutputs, const AtenTensorHandle* hx, int64_t hx_len_, const AtenTensorHandle* params, int64_t params_len_, int32_t has_biases, int64_t num_layers, double dropout, int32_t train, int32_t bidirectional, int32_t batch_first); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__thnn_fused_lstm_cell_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle input_gates, AtenTensorHandle hidden_gates, AtenTensorHandle cx, AtenTensorHandle* input_bias, AtenTensorHandle* hidden_bias); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__thnn_fused_lstm_cell_backward_impl_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle* grad_hy, AtenTensorHandle* grad_cy, AtenTensorHandle cx, AtenTensorHandle cy, AtenTensorHandle workspace, int32_t has_bias); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__thnn_fused_gru_cell_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input_gates, AtenTensorHandle hidden_gates, AtenTensorHandle hx, AtenTensorHandle* input_bias, AtenTensorHandle* hidden_bias); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__thnn_fused_gru_cell_backward_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle out3, AtenTensorHandle out4, AtenTensorHandle grad_hy, AtenTensorHandle workspace, int32_t has_bias); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__pack_padded_sequence_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle input, AtenTensorHandle lengths, int32_t batch_first); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_set_source_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle source); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_set_source_Tensor(AtenTensorHandle self, AtenTensorHandle source, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_set_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_set(AtenTensorHandle self, AtenTensorHandle* ret0); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_lift_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_lift_fresh_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_fill_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_fill_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_masked_scatter_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask, AtenTensorHandle source); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__masked_softmax_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle mask, int64_t* dim, int64_t* mask_type); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__masked_softmax_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle output, AtenTensorHandle mask, int64_t* dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_put_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle index, AtenTensorHandle source, int32_t accumulate); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_fill_int_Scalar_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_index_fill_int_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, AtenTensorHandle index, AtenTensorHandle value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bitwise_and_Scalar_Tensor_out(AtenTensorHandle out, double self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bitwise_or_Scalar_Tensor_out(AtenTensorHandle out, double self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bitwise_xor_Scalar_Tensor_out(AtenTensorHandle out, double self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda___lshift___Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda___lshift___Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bitwise_left_shift_Scalar_Tensor_out(AtenTensorHandle out, double self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda___rshift___Scalar_out(AtenTensorHandle out, AtenTensorHandle self, double other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda___rshift___Tensor_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bitwise_right_shift_Scalar_Tensor_out(AtenTensorHandle out, double self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_tril_indices_out(AtenTensorHandle out, int64_t row, int64_t col, int64_t offset); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_triu_indices_out(AtenTensorHandle out, int64_t row, int64_t col, int64_t offset); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_trace_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__cholesky_solve_helper_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle A, int32_t upper); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_dist_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle other, double p); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__histogramdd_bin_edges_out(const AtenTensorHandle* out, int64_t out_len_, AtenTensorHandle self, const int64_t* bins, int64_t bins_len_, const double** range, int64_t range_len_, AtenTensorHandle* weight, int32_t density); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__histogramdd_from_bin_cts_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* bins, int64_t bins_len_, const double** range, int64_t range_len_, AtenTensorHandle* weight, int32_t density); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__histogramdd_from_bin_tensors_out(AtenTensorHandle out, AtenTensorHandle self, const AtenTensorHandle* bins, int64_t bins_len_, AtenTensorHandle* weight, int32_t density); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_remainder_Scalar_Tensor_out(AtenTensorHandle out, double self, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_argsort_stable_out(AtenTensorHandle out, AtenTensorHandle self, int32_t stable, int64_t dim, int32_t descending); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unfold_backward_out(AtenTensorHandle out, AtenTensorHandle grad_in, const int64_t* input_sizes, int64_t input_sizes_len_, int64_t dim, int64_t size, int64_t step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__amp_foreach_non_finite_check_and_unscale_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle found_inf, AtenTensorHandle inv_scale); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__amp_update_scale_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle growth_tracker, AtenTensorHandle found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__amp_update_scale(AtenTensorHandle self, AtenTensorHandle growth_tracker, AtenTensorHandle found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, AtenTensorHandle* ret0, AtenTensorHandle* ret1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_add_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_add_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_add_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_add_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle other, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sub_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sub_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_, double alpha); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sub_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_mul_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_mul_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_mul_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_mul_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_div_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_div_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_div_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_div_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, AtenTensorHandle other); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_clamp_max_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_clamp_max_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_clamp_max_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_clamp_min_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_clamp_min_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_clamp_min_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_maximum_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_maximum_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_maximum_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_minimum_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double scalar); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_minimum_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* other, int64_t other_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_minimum_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_addcdiv_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_addcdiv_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_addcdiv_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, AtenTensorHandle scalars); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_addcmul_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, double value); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_addcmul_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, const double* scalars, int64_t scalars_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_addcmul_Tensor_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensor1, int64_t tensor1_len_, const AtenTensorHandle* tensor2, int64_t tensor2_len_, AtenTensorHandle scalars); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_abs_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_acos_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_asin_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_atan_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_ceil_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_cos_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_cosh_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_erf_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_erfc_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_exp_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_expm1_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_floor_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_frac_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_lerp_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensors1, int64_t tensors1_len_, const AtenTensorHandle* weights, int64_t weights_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_lerp_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* tensors1, int64_t tensors1_len_, double weight); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_lgamma_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_log_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_log10_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_log1p_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_log2_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_neg_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_norm_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double ord); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_pow_List_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* exponent, int64_t exponent_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_pow_Scalar_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, double exponent); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_pow_ScalarList_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const double* exponent, int64_t exponent_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_reciprocal_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_round_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sigmoid_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sign_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sin_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sinh_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_sqrt_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_tan_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_tanh_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_trunc_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_zero_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foreach_copy_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* src, int64_t src_len_, int32_t non_blocking); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_bucketize_Scalar_out(AtenTensorHandle out, double self, AtenTensorHandle boundaries, int32_t out_int32, int32_t right); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_glu_jvp_out(AtenTensorHandle out, AtenTensorHandle glu, AtenTensorHandle x, AtenTensorHandle dx, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_glu_backward_jvp_out(AtenTensorHandle out, AtenTensorHandle grad_x, AtenTensorHandle grad_glu, AtenTensorHandle x, AtenTensorHandle dgrad_glu, AtenTensorHandle dx, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_hardswish_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_rrelu_with_noise_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle noise, double lower, double upper, int32_t training, int32_t self_is_result); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_mkldnn_adaptive_avg_pool2d_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__adaptive_avg_pool2d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__adaptive_avg_pool2d_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__adaptive_avg_pool3d_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__adaptive_avg_pool3d_backward_out(AtenTensorHandle out, AtenTensorHandle grad_output, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__slow_conv2d_backward_output_mask_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle out2, AtenTensorHandle grad_output, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int32_t* output_mask, int64_t output_mask_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_conv_depthwise3d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slow_conv_dilated2d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slow_conv_dilated3d_out(AtenTensorHandle out, AtenTensorHandle self, AtenTensorHandle weight, const int64_t* kernel_size, int64_t kernel_size_len_, AtenTensorHandle* bias, const int64_t* stride, int64_t stride_len_, const int64_t* padding, int64_t padding_len_, const int64_t* dilation, int64_t dilation_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_isinf_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_linalg_matrix_exp_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__test_optional_intlist_out(AtenTensorHandle out, AtenTensorHandle values, const int64_t** addends, int64_t addends_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__test_optional_filled_intlist_out(AtenTensorHandle out, AtenTensorHandle values, const int64_t** addends, int64_t addends_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__test_optional_floatlist_out(AtenTensorHandle out, AtenTensorHandle values, const double** addends, int64_t addends_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__test_warn_in_autograd_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__test_autograd_multiple_dispatch_fullcoverage_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__test_autograd_multiple_dispatch_view_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_segment_reduce_out(AtenTensorHandle out, AtenTensorHandle data, const char* reduce, AtenTensorHandle* lengths, AtenTensorHandle* indices, AtenTensorHandle* offsets, int64_t axis, int32_t unsafe, double* initial); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__segment_reduce_backward_out(AtenTensorHandle out, AtenTensorHandle grad, AtenTensorHandle output, AtenTensorHandle data, const char* reduce, AtenTensorHandle* lengths, AtenTensorHandle* offsets, int64_t axis, double* initial); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__nested_tensor_from_tensor_list_out(AtenTensorHandle out, const AtenTensorHandle* list, int64_t list_len_, int32_t* dtype, int32_t* layout, int32_t* device, int32_t device_index_, int32_t* pin_memory); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fw_primal_copy_out(AtenTensorHandle out, AtenTensorHandle self, int64_t level); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__make_dual_copy_out(AtenTensorHandle out, AtenTensorHandle primal, AtenTensorHandle tangent, int64_t level); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_as_real_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_as_complex_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__conj_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__neg_view_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_as_strided_copy_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_, int64_t* storage_offset); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__sparse_broadcast_to_copy_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_diagonal_copy_out(AtenTensorHandle out, AtenTensorHandle self, int64_t offset, int64_t dim1, int64_t dim2); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_expand_copy_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, int32_t implicit); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_permute_copy_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dims, int64_t dims_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__reshape_alias_copy_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_, const int64_t* stride, int64_t stride_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_select_copy_int_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int64_t index); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_detach_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_slice_copy_Tensor_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim, int64_t* start, int64_t* end, int64_t step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_squeeze_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_squeeze_copy_dim_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_squeeze_copy_dims_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* dim, int64_t dim_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_t_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_transpose_copy_int_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim0, int64_t dim1); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unsqueeze_copy_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dim); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__indices_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__values_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_indices_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_values_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_crow_indices_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_col_indices_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_ccol_indices_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_row_indices_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_copy_out(AtenTensorHandle out, AtenTensorHandle self, const int64_t* size, int64_t size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_view_copy_dtype_out(AtenTensorHandle out, AtenTensorHandle self, int32_t dtype); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_unfold_copy_out(AtenTensorHandle out, AtenTensorHandle self, int64_t dimension, int64_t size, int64_t step); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_alias_copy_out(AtenTensorHandle out, AtenTensorHandle self); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda_to_padded_tensor_out(AtenTensorHandle out, AtenTensorHandle self, double padding, const int64_t** output_size, int64_t output_size_len_); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__transformer_encoder_layer_fwd_out(AtenTensorHandle out, AtenTensorHandle src, int64_t embed_dim, int64_t num_heads, AtenTensorHandle qkv_weight, AtenTensorHandle qkv_bias, AtenTensorHandle proj_weight, AtenTensorHandle proj_bias, int32_t use_gelu, int32_t norm_first, double eps, AtenTensorHandle norm_weight_1, AtenTensorHandle norm_bias_1, AtenTensorHandle norm_weight_2, AtenTensorHandle norm_bias_2, AtenTensorHandle ffn_weight_1, AtenTensorHandle ffn_bias_1, AtenTensorHandle ffn_weight_2, AtenTensorHandle ffn_bias_2, AtenTensorHandle* mask, int64_t* mask_type); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__native_multi_head_attention_out(AtenTensorHandle out0, AtenTensorHandle out1, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, int64_t embed_dim, int64_t num_head, AtenTensorHandle qkv_weight, AtenTensorHandle qkv_bias, AtenTensorHandle proj_weight, AtenTensorHandle proj_bias, AtenTensorHandle* mask, int32_t need_weights, int32_t average_attn_weights, int64_t* mask_type); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__triton_scaled_dot_attention_out(AtenTensorHandle out, AtenTensorHandle q, AtenTensorHandle k, AtenTensorHandle v, double dropout_p); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__triton_multi_head_attention_out(AtenTensorHandle out, AtenTensorHandle query, AtenTensorHandle key, AtenTensorHandle value, int64_t embed_dim, int64_t num_head, AtenTensorHandle qkv_weight, AtenTensorHandle qkv_bias, AtenTensorHandle proj_weight, AtenTensorHandle proj_bias, AtenTensorHandle* mask); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__foobar_out(AtenTensorHandle out, AtenTensorHandle self, int32_t arg1, int32_t arg2, int32_t arg3); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_adam_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* exp_avgs, int64_t exp_avgs_len_, const AtenTensorHandle* exp_avg_sqs, int64_t exp_avg_sqs_len_, const AtenTensorHandle* max_exp_avg_sqs, int64_t max_exp_avg_sqs_len_, const AtenTensorHandle* state_steps, int64_t state_steps_len_, double lr, double beta1, double beta2, double weight_decay, double eps, int32_t amsgrad, int32_t maximize, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_adam_tensor_lr_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* exp_avgs, int64_t exp_avgs_len_, const AtenTensorHandle* exp_avg_sqs, int64_t exp_avg_sqs_len_, const AtenTensorHandle* max_exp_avg_sqs, int64_t max_exp_avg_sqs_len_, const AtenTensorHandle* state_steps, int64_t state_steps_len_, AtenTensorHandle lr, double beta1, double beta2, double weight_decay, double eps, int32_t amsgrad, int32_t maximize, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_adamw_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* exp_avgs, int64_t exp_avgs_len_, const AtenTensorHandle* exp_avg_sqs, int64_t exp_avg_sqs_len_, const AtenTensorHandle* max_exp_avg_sqs, int64_t max_exp_avg_sqs_len_, const AtenTensorHandle* state_steps, int64_t state_steps_len_, double lr, double beta1, double beta2, double weight_decay, double eps, int32_t amsgrad, int32_t maximize, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_adamw_tensor_lr_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* exp_avgs, int64_t exp_avgs_len_, const AtenTensorHandle* exp_avg_sqs, int64_t exp_avg_sqs_len_, const AtenTensorHandle* max_exp_avg_sqs, int64_t max_exp_avg_sqs_len_, const AtenTensorHandle* state_steps, int64_t state_steps_len_, AtenTensorHandle lr, double beta1, double beta2, double weight_decay, double eps, int32_t amsgrad, int32_t maximize, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_sgd_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* momentum_buffer_list, int64_t momentum_buffer_list_len_, double weight_decay, double momentum, double lr, double dampening, int32_t nesterov, int32_t maximize, int32_t is_first_step, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); +AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__fused_sgd_tensor_lr_out(const AtenTensorHandle* out, int64_t out_len_, const AtenTensorHandle* self, int64_t self_len_, const AtenTensorHandle* grads, int64_t grads_len_, const AtenTensorHandle* momentum_buffer_list, int64_t momentum_buffer_list_len_, double weight_decay, double momentum, AtenTensorHandle lr, double dampening, int32_t nesterov, int32_t maximize, int32_t is_first_step, AtenTensorHandle* grad_scale, AtenTensorHandle* found_inf); + +#ifdef __cplusplus +} // extern "C" +#endif + diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/proxy_executor.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/proxy_executor.h new file mode 100644 index 0000000000000000000000000000000000000000..4446640d2a7055654b76f168c587b9370393ad4e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/proxy_executor.h @@ -0,0 +1,24 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace aot_inductor { + +class ProxyExecutor { + public: + ProxyExecutor() {} + virtual ~ProxyExecutor() {} + + virtual void call_function( + int extern_node_index, + int num_ints, + int64_t* flatten_int_args, + int num_tensors, + AtenTensorHandle* flatten_tensor_args) = 0; +}; + +} // namespace aot_inductor +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/tensor_converter.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/tensor_converter.h new file mode 100644 index 0000000000000000000000000000000000000000..4bd65bdd705fd1af914c82abd1a4dc6fc7ea8dc2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/tensor_converter.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace aot_inductor { + +// Functions declared here are not meant to be called from the AOTInductor +// generated model.so + +// No ownership transfer, just pointer type conversion +TORCH_API at::Tensor* tensor_handle_to_tensor_pointer(AtenTensorHandle handle); + +// No ownership transfer, just pointer type conversion +TORCH_API AtenTensorHandle tensor_pointer_to_tensor_handle(at::Tensor* tensor); + +TORCH_API AtenTensorHandle new_tensor_handle(at::Tensor&& tensor); + +// unsafe_alloc_new_handles_from_tensors is used for allocating new aten +// tensor objects and return them as a vector of AtenTensorHandle (raw +// pointers), and those pointers will be stolen by model.so. +TORCH_API std::vector unsafe_alloc_new_handles_from_tensors( + std::vector& tensors); + +// alloc_tensors_by_stealing_from_handles is used for creating a vector of aten +// tensors by stealing from an array of handles. Only the handles are stolen, +// and the array itself is borrowed. +// +// WARNING: Can NOT be called in model.so unless in the non-ABI-compatible mode +TORCH_API std::vector alloc_tensors_by_stealing_from_handles( + AtenTensorHandle* handles, + size_t length); + +} // namespace aot_inductor +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..a5f891c6382037cff0a0729d31ea285bf682daef --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/utils.h @@ -0,0 +1,142 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define AOTI_TORCH_CONVERT_EXCEPTION_TO_ERROR_CODE(...) \ + try { \ + __VA_ARGS__ \ + } catch (const std::exception& e) { \ + LOG(ERROR) << "Exception in aoti_torch: " << e.what(); \ + return AOTI_TORCH_FAILURE; \ + } catch (...) { \ + LOG(ERROR) << "Exception in aoti_torch: UNKNOWN"; \ + return AOTI_TORCH_FAILURE; \ + } \ + return AOTI_TORCH_SUCCESS; + +namespace torch::aot_inductor { + +// utility functions to convert a pointer to an optional value +template +inline c10::optional pointer_to_optional(T* ptr) { + return ptr ? c10::make_optional(*ptr) : c10::nullopt; +} + +template >> +inline c10::optional pointer_to_optional(U* ptr) { + return ptr ? c10::make_optional(T(*ptr)) : c10::nullopt; +} + +template <> +inline c10::optional pointer_to_optional(AtenTensorHandle* ptr) { + return ptr ? c10::make_optional(*tensor_handle_to_tensor_pointer(*ptr)) + : c10::nullopt; +} + +template <> +inline c10::optional pointer_to_optional( + const AtenTensorHandle* ptr) { + return ptr ? c10::make_optional(*tensor_handle_to_tensor_pointer(*ptr)) + : c10::nullopt; +} + +inline c10::optional pointer_to_optional_device( + int32_t* device_type, + int32_t device_index) { + return device_type ? c10::make_optional(c10::Device( + static_cast(*device_type), + static_cast(device_index))) + : c10::nullopt; +} + +// utility functions to convert a pointer to a list +template +struct is_optional : std::false_type {}; +template +struct is_optional> : std::true_type {}; + +template +inline c10::ArrayRef pointer_to_list(T* ptr, int64_t len) { + return c10::ArrayRef(ptr, len); +} + +template < + class T, + class U, + typename = std::enable_if_t>, + typename = std::enable_if_t::value>> +inline std::vector pointer_to_list(U* ptr, int64_t len) { + // std::vector will be implicitly converted to c10::ArrayRef at the call + // site + std::vector result; + result.reserve(len); + for (int64_t i = 0; i < len; i++) { + result.emplace_back(T(ptr[i])); + } + return result; +} + +template ::value>> +inline std::vector pointer_to_list(U** ptr, int64_t len) { + // Here U** denotes a list of optional arguments + // std::vector will be implicitly converted to c10::ArrayRef at the call + // site + std::vector result; + result.reserve(len); + for (int64_t i = 0; i < len; i++) { + result.emplace_back(pointer_to_optional(ptr[i])); + } + return result; +} + +template <> +inline std::vector pointer_to_list( + const AtenTensorHandle* ptr, + int64_t len) { + std::vector result; + result.reserve(len); + for (int64_t i = 0; i < len; i++) { + result.emplace_back(*tensor_handle_to_tensor_pointer(*ptr)); + } + return result; +} + +template <> +inline std::vector> pointer_to_list( + const AtenTensorHandle** ptr, + int64_t len) { + std::vector> result; + result.reserve(len); + for (int64_t i = 0; i < len; i++) { + result.emplace_back(pointer_to_optional(ptr[i])); + } + return result; +} + +template +inline std::array pointer_to_list(const int32_t* ptr) { + std::array result; + std::copy(ptr, ptr + N, result.begin()); + return result; +} + +// utility functions to convert a pointer to a list of optional values +template +inline c10::optional> pointer_to_optional_list( + U** ptr, + int64_t len) { + return ptr + ? c10::make_optional>(pointer_to_list(*ptr, len)) + : c10::nullopt; +} + +} // namespace torch::aot_inductor diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/inductor_ops.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/inductor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..817ff82e030d704e1e7030c9a18d5cd936994793 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/inductor_ops.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +namespace torch { +namespace inductor { + +TORCH_API at::Tensor _mm_plus_mm( + const at::Tensor& a, + const at::Tensor& b, + const at::Tensor& c, + const at::Tensor& d, + at::Tensor& out); + +TORCH_API at::Tensor _alloc_from_pool( + const at::Tensor& self, + int64_t offset_bytes, + at::ScalarType dtype, + at::IntArrayRef size, + at::IntArrayRef stride); + +// Similar to as_strided with the following differences +// - offset is added to the existing offset (rather than replacing it) +// - view tracking is disabled similar to unsafe_view +TORCH_API at::Tensor _reinterpret_tensor( + const at::Tensor& self, + at::IntArrayRef size, + at::IntArrayRef stride, + int64_t offset_increment = 0); + +} // namespace inductor +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_compat.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..73b991cf3fbfc35dfeb1dce99755d11e5ed5dceb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_compat.h @@ -0,0 +1,38 @@ +#ifndef PYTHON_COMPAT +#define PYTHON_COMPAT + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// PyTorch-only compat functions + +#define IS_PYTHON_3_11_PLUS PY_VERSION_HEX >= 0x030B00C1 +#define IS_PYTHON_3_12_PLUS PY_VERSION_HEX >= 0x030C0000 + +PYCAPI_COMPAT_STATIC_INLINE(int) +PyCode_GetNCellvars(PyCodeObject* code) { +// gh-26364 added co_ncellvars to Python 3.11.0rc1 +#if IS_PYTHON_3_11_PLUS + return code->co_ncellvars; +#else + return PyTuple_GET_SIZE(code->co_cellvars); +#endif +} + +PYCAPI_COMPAT_STATIC_INLINE(int) +PyCode_GetNFreevars(PyCodeObject* code) { +// gh-26364 added co_nfreevars to Python 3.11.0rc1 +#if IS_PYTHON_3_11_PLUS + return code->co_nfreevars; +#else + return PyTuple_GET_SIZE(code->co_freevars); +#endif +} + +#ifdef __cplusplus +} +#endif +#endif // PYTHON_COMPAT diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_torch_function_mode.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_torch_function_mode.h new file mode 100644 index 0000000000000000000000000000000000000000..f6652dfd93084c07395f50ca6c96823c364eff5a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_torch_function_mode.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +namespace torch { +namespace overrides { + +struct StashTorchFunctionModeGuard { + StashTorchFunctionModeGuard() { + cur_mode_ = at::impl::PythonTorchFunctionTLS::pop_stack(); + } + ~StashTorchFunctionModeGuard() { + at::impl::PythonTorchFunctionTLS::push_onto_stack(cur_mode_); + } + + const std::shared_ptr& get_cur_mode() { + return cur_mode_; + } + + private: + std::shared_ptr cur_mode_; +}; + +} // namespace overrides +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pythoncapi_compat.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pythoncapi_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..05072be63ad18fc9e0d350fc6fedf4bf85261b3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pythoncapi_compat.h @@ -0,0 +1,716 @@ +// Header file providing new C API functions to old Python versions. +// +// File distributed under the Zero Clause BSD (0BSD) license. +// Copyright Contributors to the pythoncapi_compat project. +// +// Homepage: +// https://github.com/python/pythoncapi_compat +// +// Latest version: +// https://raw.githubusercontent.com/python/pythoncapi_compat/master/pythoncapi_compat.h +// +// SPDX-License-Identifier: 0BSD + +#ifndef PYTHONCAPI_COMPAT +#define PYTHONCAPI_COMPAT + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "frameobject.h" // PyFrameObject, PyFrame_GetBack() + + +// Compatibility with Visual Studio 2013 and older which don't support +// the inline keyword in C (only in C++): use __inline instead. +#if (defined(_MSC_VER) && _MSC_VER < 1900 \ + && !defined(__cplusplus) && !defined(inline)) +# define PYCAPI_COMPAT_STATIC_INLINE(TYPE) static __inline TYPE +#else +# define PYCAPI_COMPAT_STATIC_INLINE(TYPE) static inline TYPE +#endif + + +#ifndef _Py_CAST +# define _Py_CAST(type, expr) ((type)(expr)) +#endif + +// On C++11 and newer, _Py_NULL is defined as nullptr on C++11, +// otherwise it is defined as NULL. +#ifndef _Py_NULL +# if defined(__cplusplus) && __cplusplus >= 201103 +# define _Py_NULL nullptr +# else +# define _Py_NULL NULL +# endif +#endif + +// Cast argument to PyObject* type. +#ifndef _PyObject_CAST +# define _PyObject_CAST(op) _Py_CAST(PyObject*, op) +#endif + + +// bpo-42262 added Py_NewRef() to Python 3.10.0a3 +#if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_NewRef) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +_Py_NewRef(PyObject *obj) +{ + Py_INCREF(obj); + return obj; +} +#define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj)) +#endif + + +// bpo-42262 added Py_XNewRef() to Python 3.10.0a3 +#if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_XNewRef) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +_Py_XNewRef(PyObject *obj) +{ + Py_XINCREF(obj); + return obj; +} +#define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj)) +#endif + + +// bpo-39573 added Py_SET_REFCNT() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_REFCNT) +PYCAPI_COMPAT_STATIC_INLINE(void) +_Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt) +{ + ob->ob_refcnt = refcnt; +} +#define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT(_PyObject_CAST(ob), refcnt) +#endif + + +// Py_SETREF() and Py_XSETREF() were added to Python 3.5.2. +// It is excluded from the limited C API. +#if (PY_VERSION_HEX < 0x03050200 && !defined(Py_SETREF)) && !defined(Py_LIMITED_API) +#define Py_SETREF(dst, src) \ + do { \ + PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \ + PyObject *_tmp_dst = (*_tmp_dst_ptr); \ + *_tmp_dst_ptr = _PyObject_CAST(src); \ + Py_DECREF(_tmp_dst); \ + } while (0) + +#define Py_XSETREF(dst, src) \ + do { \ + PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \ + PyObject *_tmp_dst = (*_tmp_dst_ptr); \ + *_tmp_dst_ptr = _PyObject_CAST(src); \ + Py_XDECREF(_tmp_dst); \ + } while (0) +#endif + + +// bpo-43753 added Py_Is(), Py_IsNone(), Py_IsTrue() and Py_IsFalse() +// to Python 3.10.0b1. +#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_Is) +# define Py_Is(x, y) ((x) == (y)) +#endif +#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_IsNone) +# define Py_IsNone(x) Py_Is(x, Py_None) +#endif +#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_IsTrue) +# define Py_IsTrue(x) Py_Is(x, Py_True) +#endif +#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_IsFalse) +# define Py_IsFalse(x) Py_Is(x, Py_False) +#endif + + +// bpo-39573 added Py_SET_TYPE() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE) +PYCAPI_COMPAT_STATIC_INLINE(void) +_Py_SET_TYPE(PyObject *ob, PyTypeObject *type) +{ + ob->ob_type = type; +} +#define Py_SET_TYPE(ob, type) _Py_SET_TYPE(_PyObject_CAST(ob), type) +#endif + + +// bpo-39573 added Py_SET_SIZE() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_SIZE) +PYCAPI_COMPAT_STATIC_INLINE(void) +_Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size) +{ + ob->ob_size = size; +} +#define Py_SET_SIZE(ob, size) _Py_SET_SIZE((PyVarObject*)(ob), size) +#endif + + +// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1 +#if PY_VERSION_HEX < 0x030900B1 || defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyCodeObject*) +PyFrame_GetCode(PyFrameObject *frame) +{ + assert(frame != _Py_NULL); + assert(frame->f_code != _Py_NULL); + return _Py_CAST(PyCodeObject*, Py_NewRef(frame->f_code)); +} +#endif + +PYCAPI_COMPAT_STATIC_INLINE(PyCodeObject*) +_PyFrame_GetCodeBorrow(PyFrameObject *frame) +{ + PyCodeObject *code = PyFrame_GetCode(frame); + Py_DECREF(code); + return code; +} + + +// bpo-40421 added PyFrame_GetBack() to Python 3.9.0b1 +#if PY_VERSION_HEX < 0x030900B1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyFrameObject*) +PyFrame_GetBack(PyFrameObject *frame) +{ + assert(frame != _Py_NULL); + return _Py_CAST(PyFrameObject*, Py_XNewRef(frame->f_back)); +} +#endif + +#if !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyFrameObject*) +_PyFrame_GetBackBorrow(PyFrameObject *frame) +{ + PyFrameObject *back = PyFrame_GetBack(frame); + Py_XDECREF(back); + return back; +} +#endif + + +// bpo-40421 added PyFrame_GetLocals() to Python 3.11.0a7 +#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyFrame_GetLocals(PyFrameObject *frame) +{ +#if PY_VERSION_HEX >= 0x030400B1 + if (PyFrame_FastToLocalsWithError(frame) < 0) { + return NULL; + } +#else + PyFrame_FastToLocals(frame); +#endif + return Py_NewRef(frame->f_locals); +} +#endif + + +// bpo-40421 added PyFrame_GetGlobals() to Python 3.11.0a7 +#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyFrame_GetGlobals(PyFrameObject *frame) +{ + return Py_NewRef(frame->f_globals); +} +#endif + + +// bpo-40421 added PyFrame_GetBuiltins() to Python 3.11.0a7 +#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyFrame_GetBuiltins(PyFrameObject *frame) +{ + return Py_NewRef(frame->f_builtins); +} +#endif + + +// bpo-40421 added PyFrame_GetLasti() to Python 3.11.0b1 +#if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(int) +PyFrame_GetLasti(PyFrameObject *frame) +{ +#if PY_VERSION_HEX >= 0x030A00A7 + // bpo-27129: Since Python 3.10.0a7, f_lasti is an instruction offset, + // not a bytes offset anymore. Python uses 16-bit "wordcode" (2 bytes) + // instructions. + if (frame->f_lasti < 0) { + return -1; + } + return frame->f_lasti * 2; +#else + return frame->f_lasti; +#endif +} +#endif + + +// gh-91248 added PyFrame_GetVar() to Python 3.12.0a2 +#if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyFrame_GetVar(PyFrameObject *frame, PyObject *name) +{ + PyObject *locals, *value; + + locals = PyFrame_GetLocals(frame); + if (locals == NULL) { + return NULL; + } +#if PY_VERSION_HEX >= 0x03000000 + value = PyDict_GetItemWithError(locals, name); +#else + value = PyDict_GetItem(locals, name); +#endif + Py_DECREF(locals); + + if (value == NULL) { + if (PyErr_Occurred()) { + return NULL; + } +#if PY_VERSION_HEX >= 0x03000000 + PyErr_Format(PyExc_NameError, "variable %R does not exist", name); +#else + PyErr_SetString(PyExc_NameError, "variable does not exist"); +#endif + return NULL; + } + return Py_NewRef(value); +} +#endif + + +// gh-91248 added PyFrame_GetVarString() to Python 3.12.0a2 +#if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyFrame_GetVarString(PyFrameObject *frame, const char *name) +{ + PyObject *name_obj, *value; + name_obj = PyUnicode_FromString(name); + if (name_obj == NULL) { + return NULL; + } + value = PyFrame_GetVar(frame, name_obj); + Py_DECREF(name_obj); + return value; +} +#endif + + +// bpo-39947 added PyThreadState_GetInterpreter() to Python 3.9.0a5 +#if PY_VERSION_HEX < 0x030900A5 || defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyInterpreterState *) +PyThreadState_GetInterpreter(PyThreadState *tstate) +{ + assert(tstate != _Py_NULL); + return tstate->interp; +} +#endif + + +// bpo-40429 added PyThreadState_GetFrame() to Python 3.9.0b1 +#if PY_VERSION_HEX < 0x030900B1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyFrameObject*) +PyThreadState_GetFrame(PyThreadState *tstate) +{ + assert(tstate != _Py_NULL); + return _Py_CAST(PyFrameObject *, Py_XNewRef(tstate->frame)); +} +#endif + +#if !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyFrameObject*) +_PyThreadState_GetFrameBorrow(PyThreadState *tstate) +{ + PyFrameObject *frame = PyThreadState_GetFrame(tstate); + Py_XDECREF(frame); + return frame; +} +#endif + + +// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a5 +#if PY_VERSION_HEX < 0x030900A5 || defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyInterpreterState*) +PyInterpreterState_Get(void) +{ + PyThreadState *tstate; + PyInterpreterState *interp; + + tstate = PyThreadState_GET(); + if (tstate == _Py_NULL) { + Py_FatalError("GIL released (tstate is NULL)"); + } + interp = tstate->interp; + if (interp == _Py_NULL) { + Py_FatalError("no current interpreter"); + } + return interp; +} +#endif + + +// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a6 +#if 0x030700A1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030900A6 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(uint64_t) +PyThreadState_GetID(PyThreadState *tstate) +{ + assert(tstate != _Py_NULL); + return tstate->id; +} +#endif + +// bpo-43760 added PyThreadState_EnterTracing() to Python 3.11.0a2 +#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(void) +PyThreadState_EnterTracing(PyThreadState *tstate) +{ + tstate->tracing++; +#if PY_VERSION_HEX >= 0x030A00A1 + tstate->cframe->use_tracing = 0; +#else + tstate->use_tracing = 0; +#endif +} +#endif + +// bpo-43760 added PyThreadState_LeaveTracing() to Python 3.11.0a2 +#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(void) +PyThreadState_LeaveTracing(PyThreadState *tstate) +{ + int use_tracing = (tstate->c_tracefunc != _Py_NULL + || tstate->c_profilefunc != _Py_NULL); + tstate->tracing--; +#if PY_VERSION_HEX >= 0x030A00A1 + tstate->cframe->use_tracing = use_tracing; +#else + tstate->use_tracing = use_tracing; +#endif +} +#endif + + +// bpo-37194 added PyObject_CallNoArgs() to Python 3.9.0a1 +// PyObject_CallNoArgs() added to PyPy 3.9.16-v7.3.11 +#if !defined(PyObject_CallNoArgs) && PY_VERSION_HEX < 0x030900A1 +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyObject_CallNoArgs(PyObject *func) +{ + return PyObject_CallFunctionObjArgs(func, NULL); +} + +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyObject_CallMethodNoArgs(PyObject *obj, PyObject *name) +{ + return PyObject_CallMethodObjArgs(obj, name, NULL); +} +#endif + + +// bpo-39245 made PyObject_CallOneArg() public (previously called +// _PyObject_CallOneArg) in Python 3.9.0a4 +// PyObject_CallOneArg() added to PyPy 3.9.16-v7.3.11 +#if !defined(PyObject_CallOneArg) && PY_VERSION_HEX < 0x030900A4 +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyObject_CallOneArg(PyObject *func, PyObject *arg) +{ + return PyObject_CallFunctionObjArgs(func, arg, NULL); +} + +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyObject_CallMethodOneArg(PyObject *obj, PyObject *name, PyObject *arg) +{ + return PyObject_CallMethodObjArgs(obj, name, arg, NULL); +} +#endif + + +// bpo-1635741 added PyModule_AddObjectRef() to Python 3.10.0a3 +#if PY_VERSION_HEX < 0x030A00A3 +PYCAPI_COMPAT_STATIC_INLINE(int) +PyModule_AddObjectRef(PyObject *module, const char *name, PyObject *value) +{ + int res; + Py_XINCREF(value); + res = PyModule_AddObject(module, name, value); + if (res < 0) { + Py_XDECREF(value); + } + return res; +} +#endif + + +// bpo-40024 added PyModule_AddType() to Python 3.9.0a5 +#if PY_VERSION_HEX < 0x030900A5 +PYCAPI_COMPAT_STATIC_INLINE(int) +PyModule_AddType(PyObject *module, PyTypeObject *type) +{ + const char *name, *dot; + + if (PyType_Ready(type) < 0) { + return -1; + } + + // inline _PyType_Name() + name = type->tp_name; + assert(name != _Py_NULL); + dot = strrchr(name, '.'); + if (dot != _Py_NULL) { + name = dot + 1; + } + + return PyModule_AddObjectRef(module, name, _PyObject_CAST(type)); +} +#endif + + +// bpo-40241 added PyObject_GC_IsTracked() to Python 3.9.0a6. +// bpo-4688 added _PyObject_GC_IS_TRACKED() to Python 2.7.0a2. +#if PY_VERSION_HEX < 0x030900A6 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(int) +PyObject_GC_IsTracked(PyObject* obj) +{ + return (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj)); +} +#endif + +// bpo-40241 added PyObject_GC_IsFinalized() to Python 3.9.0a6. +// bpo-18112 added _PyGCHead_FINALIZED() to Python 3.4.0 final. +#if PY_VERSION_HEX < 0x030900A6 && PY_VERSION_HEX >= 0x030400F0 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(int) +PyObject_GC_IsFinalized(PyObject *obj) +{ + PyGC_Head *gc = _Py_CAST(PyGC_Head*, obj) - 1; + return (PyObject_IS_GC(obj) && _PyGCHead_FINALIZED(gc)); +} +#endif + + +// bpo-39573 added Py_IS_TYPE() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_IS_TYPE) +PYCAPI_COMPAT_STATIC_INLINE(int) +_Py_IS_TYPE(PyObject *ob, PyTypeObject *type) { + return Py_TYPE(ob) == type; +} +#define Py_IS_TYPE(ob, type) _Py_IS_TYPE(_PyObject_CAST(ob), type) +#endif + + +// bpo-46906 added PyFloat_Pack2() and PyFloat_Unpack2() to Python 3.11a7. +// bpo-11734 added _PyFloat_Pack2() and _PyFloat_Unpack2() to Python 3.6.0b1. +// Python 3.11a2 moved _PyFloat_Pack2() and _PyFloat_Unpack2() to the internal +// C API: Python 3.11a2-3.11a6 versions are not supported. +#if 0x030600B1 <= PY_VERSION_HEX && PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(int) +PyFloat_Pack2(double x, char *p, int le) +{ return _PyFloat_Pack2(x, (unsigned char*)p, le); } + +PYCAPI_COMPAT_STATIC_INLINE(double) +PyFloat_Unpack2(const char *p, int le) +{ return _PyFloat_Unpack2((const unsigned char *)p, le); } +#endif + + +// bpo-46906 added PyFloat_Pack4(), PyFloat_Pack8(), PyFloat_Unpack4() and +// PyFloat_Unpack8() to Python 3.11a7. +// Python 3.11a2 moved _PyFloat_Pack4(), _PyFloat_Pack8(), _PyFloat_Unpack4() +// and _PyFloat_Unpack8() to the internal C API: Python 3.11a2-3.11a6 versions +// are not supported. +#if PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(int) +PyFloat_Pack4(double x, char *p, int le) +{ return _PyFloat_Pack4(x, (unsigned char*)p, le); } + +PYCAPI_COMPAT_STATIC_INLINE(int) +PyFloat_Pack8(double x, char *p, int le) +{ return _PyFloat_Pack8(x, (unsigned char*)p, le); } + +PYCAPI_COMPAT_STATIC_INLINE(double) +PyFloat_Unpack4(const char *p, int le) +{ return _PyFloat_Unpack4((const unsigned char *)p, le); } + +PYCAPI_COMPAT_STATIC_INLINE(double) +PyFloat_Unpack8(const char *p, int le) +{ return _PyFloat_Unpack8((const unsigned char *)p, le); } +#endif + + +// gh-92154 added PyCode_GetCode() to Python 3.11.0b1 +#if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyCode_GetCode(PyCodeObject *code) +{ + return Py_NewRef(code->co_code); +} +#endif + + +// gh-95008 added PyCode_GetVarnames() to Python 3.11.0rc1 +#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyCode_GetVarnames(PyCodeObject *code) +{ + return Py_NewRef(code->co_varnames); +} +#endif + +// gh-95008 added PyCode_GetFreevars() to Python 3.11.0rc1 +#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyCode_GetFreevars(PyCodeObject *code) +{ + return Py_NewRef(code->co_freevars); +} +#endif + +// gh-95008 added PyCode_GetCellvars() to Python 3.11.0rc1 +#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyCode_GetCellvars(PyCodeObject *code) +{ + return Py_NewRef(code->co_cellvars); +} +#endif + + +// Py_UNUSED() was added to Python 3.4.0b2. +#if PY_VERSION_HEX < 0x030400B2 && !defined(Py_UNUSED) +# if defined(__GNUC__) || defined(__clang__) +# define Py_UNUSED(name) _unused_ ## name __attribute__((unused)) +# else +# define Py_UNUSED(name) _unused_ ## name +# endif +#endif + + +// gh-105922 added PyImport_AddModuleRef() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D00A0 +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyImport_AddModuleRef(const char *name) +{ + return Py_XNewRef(PyImport_AddModule(name)); +} +#endif + + +// gh-105927 added PyWeakref_GetRef() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D0000 +PYCAPI_COMPAT_STATIC_INLINE(int) +PyWeakref_GetRef(PyObject *ref, PyObject **pobj) +{ + PyObject *obj; + if (ref != NULL && !PyWeakref_Check(ref)) { + *pobj = NULL; + PyErr_SetString(PyExc_TypeError, "expected a weakref"); + return -1; + } + obj = PyWeakref_GetObject(ref); + if (obj == NULL) { + // SystemError if ref is NULL + *pobj = NULL; + return -1; + } + if (obj == Py_None) { + *pobj = NULL; + return 0; + } + *pobj = Py_NewRef(obj); + return (*pobj != NULL); +} +#endif + + +// bpo-36974 added PY_VECTORCALL_ARGUMENTS_OFFSET to Python 3.8b1 +#ifndef PY_VECTORCALL_ARGUMENTS_OFFSET +# define PY_VECTORCALL_ARGUMENTS_OFFSET (_Py_CAST(size_t, 1) << (8 * sizeof(size_t) - 1)) +#endif + +// bpo-36974 added PyVectorcall_NARGS() to Python 3.8b1 +#if PY_VERSION_HEX < 0x030800B1 +static inline Py_ssize_t +PyVectorcall_NARGS(size_t n) +{ + return n & ~PY_VECTORCALL_ARGUMENTS_OFFSET; +} +#endif + + +// gh-105922 added PyObject_Vectorcall() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyObject_Vectorcall(PyObject *callable, PyObject *const *args, + size_t nargsf, PyObject *kwnames) +{ +#if PY_VERSION_HEX >= 0x030800B1 && !defined(PYPY_VERSION) + // bpo-36974 added _PyObject_Vectorcall() to Python 3.8.0b1 + return _PyObject_Vectorcall(callable, args, nargsf, kwnames); +#else + PyObject *posargs = NULL, *kwargs = NULL; + PyObject *res; + Py_ssize_t nposargs, nkwargs, i; + + if (nargsf != 0 && args == NULL) { + PyErr_BadInternalCall(); + goto error; + } + if (kwnames != NULL && !PyTuple_Check(kwnames)) { + PyErr_BadInternalCall(); + goto error; + } + + nposargs = (Py_ssize_t)PyVectorcall_NARGS(nargsf); + if (kwnames) { + nkwargs = PyTuple_GET_SIZE(kwnames); + } + else { + nkwargs = 0; + } + + posargs = PyTuple_New(nposargs); + if (posargs == NULL) { + goto error; + } + if (nposargs) { + for (i=0; i < nposargs; i++) { + PyTuple_SET_ITEM(posargs, i, Py_NewRef(*args)); + args++; + } + } + + if (nkwargs) { + kwargs = PyDict_New(); + if (kwargs == NULL) { + goto error; + } + + for (i = 0; i < nkwargs; i++) { + PyObject *key = PyTuple_GET_ITEM(kwnames, i); + PyObject *value = *args; + args++; + if (PyDict_SetItem(kwargs, key, value) < 0) { + goto error; + } + } + } + else { + kwargs = NULL; + } + + res = PyObject_Call(callable, posargs, kwargs); + Py_DECREF(posargs); + Py_XDECREF(kwargs); + return res; + +error: + Py_DECREF(posargs); + Py_XDECREF(kwargs); + return NULL; +#endif +} +#endif + + +#ifdef __cplusplus +} +#endif +#endif // PYTHONCAPI_COMPAT diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/schema_info.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/schema_info.h new file mode 100644 index 0000000000000000000000000000000000000000..461f5a6f0427b8231b872c609750b512c120401a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/schema_info.h @@ -0,0 +1,117 @@ +#pragma once + +#include +#include + +namespace torch { +namespace utils { + +using SchemaSpecialCasePair = + std::pair>; +/** + * class SchemaInfo + * + * FunctionSchema wrapper that publicizes argument value specific operator + * behavior (mutation, aliasing, special cases, etc...) + */ + +struct TORCH_API SchemaInfo { + public: + explicit SchemaInfo(c10::FunctionSchema schema) + : schema_(std::move(schema)), + alias_maps_current_(false), + has_init_(false) {} + explicit SchemaInfo(const char* signature) + : schema_(torch::jit::parseSchema(signature)), + alias_maps_current_(false), + has_init_(false) {} + + bool is_mutable(); + + bool is_mutable(const c10::SchemaArgument& argument); + + bool is_mutable(c10::string_view name); + + bool has_argument(c10::string_view name); + + bool is_nondeterministic() const; + + // Returns whether lhs and rhs may alias directly. + // This does not account for cases where lhs or rhs are a container that + // may contain elements that alias the other argument. + // Besides the checks already included in FunctionSchema::may_alias, this + // method also accounts special aliasing cases causes by aliasing argument + // values supplied from addArgumentValue. + bool may_alias( + const c10::SchemaArgument& lhs, + const c10::SchemaArgument& rhs); + + // Returns whether lhs and rhs may alias directly or whether lhs/rhs are a + // container that may contain elements that alias the other argument. Besides + // the checks already included in FunctionSchema::may_contain_alias, this + // method also accounts for special aliasing cases causes by aliasing argument + // values supplied from addArgumentValue. bidirectional = false only returns + // whether lhs may contain an alias of rhs while bidirectional = true returns + // both directions. + bool may_contain_alias( + const c10::SchemaArgument& lhs, + const c10::SchemaArgument& rhs, + bool bidirectional = true); + + void addArgumentValue(const std::string& name, const at::IValue& value); + + void addArgumentValues( + const std::vector>& value_list); + + void addArgumentValues( + const std::unordered_map& values); + + bool hasInputArgumentNamed(const std::string& name) const; + + private: + // This function enforces more conservative results when the TORCH_WARN is + // triggered from above due to duplicates in an argument list + void ensureConservativity( + const std::unordered_set& duplicates, + const std::vector& arguments_list, + c10::SchemaArgType type); + + void initSchemaInfo(); + + void generateAliasMaps(); + + bool mayContainAliasImpl( + const c10::SchemaArgument& lhs, + const c10::SchemaArgument& rhs); + + static std::vector getNonDeterministicOps(); + + static std::vector getTrainingOps(); + + const std::unordered_set& wildcardSet(); + + const std::unordered_set& containerSet(); + + // Set of all wildcard arguments + std::unordered_set wildcard_set_; + + // Set of all container arguments + std::unordered_set container_set_; + + // Map of argument IValues + std::unordered_map value_map_; + + // Alias map of inputs with each other + std::vector> input_alias_map_; + + // Alias map of outputs to inputs + std::vector> output_alias_map_; + + const c10::FunctionSchema schema_; + + bool alias_maps_current_; + + bool has_init_; +}; +} // namespace utils +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_types.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_types.h new file mode 100644 index 0000000000000000000000000000000000000000..601cc920a2e7d6916d9cc1f22dc9fa2097bc8351 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_types.h @@ -0,0 +1,22 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace utils { + +std::string options_to_string(const at::TensorOptions& options); +std::string type_to_string(const at::DeprecatedTypeProperties& type); +at::TensorOptions options_from_string(const std::string& str); + +// return a vector of all "declared" types, even those that weren't compiled +std::vector> all_declared_types(); + +// return python module name of backend, like torch.cuda, torch.foo +const char* backend_to_string(const at::Backend& backend); + +} // namespace utils +} // namespace torch