diff --git a/ckpts/universal/global_step20/zero/13.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/13.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..5d1161ffab0cd7f0f15374440147b24de8d534d0 --- /dev/null +++ b/ckpts/universal/global_step20/zero/13.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c26f1cbb528d2a6be03a4c5e5356e44e8fdf302a58e66de6bb279dd235363d8a +size 16778396 diff --git a/ckpts/universal/global_step20/zero/4.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/4.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..f31fdf24609c39669ac2c17dccc4ae101474b35d --- /dev/null +++ b/ckpts/universal/global_step20/zero/4.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07cc391ffe5d4b2c2e700b3fee684fe64274fcce6168d6de4e78cb6ae7a9418a +size 9372 diff --git a/ckpts/universal/global_step20/zero/4.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/4.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..1915682dc0cbc8ef3d69c319a2dd1b69703cf6ce --- /dev/null +++ b/ckpts/universal/global_step20/zero/4.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92173878e080b16d911d077e8609b76fa08192c6a7a21147d244111cbee3f5fd +size 9387 diff --git a/ckpts/universal/global_step20/zero/5.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/5.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..139130f1b340f84ad3e8dc3dcba3418f96081302 --- /dev/null +++ b/ckpts/universal/global_step20/zero/5.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de19ed1edc2ad066cf58370e3f0fa0f6ad33fb72b988a05d497cc5d178ded618 +size 16778396 diff --git a/ckpts/universal/global_step20/zero/5.attention.dense.weight/fp32.pt b/ckpts/universal/global_step20/zero/5.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..019aedbe547fd503caf5b211d0019d8b5e2609f0 --- /dev/null +++ b/ckpts/universal/global_step20/zero/5.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da272b30cd8666d5bb9bf92a0a7607816129910f003c72f664dc89d75edf03c6 +size 16778317 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced.h new file mode 100644 index 0000000000000000000000000000000000000000..5499f0cd27c43e4def80f694e7bb06067edaf1f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _coalesced_out(at::Tensor & out, const at::Tensor & self, bool coalesced) { + return at::_ops::_coalesced_out::call(self, coalesced, out); +} +// aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _coalesced_outf(const at::Tensor & self, bool coalesced, at::Tensor & out) { + return at::_ops::_coalesced_out::call(self, coalesced, out); +} + +// aten::_coalesced(Tensor self, bool coalesced) -> Tensor +inline at::Tensor _coalesced(const at::Tensor & self, bool coalesced) { + return at::_ops::_coalesced::call(self, coalesced); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_copy_from_and_resize_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_copy_from_and_resize_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..173a0558a04211badc4213b7f1cce5d96cb5d48c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_copy_from_and_resize_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _copy_from_and_resize { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_copy_from_and_resize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & dst); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst); +}; + +struct TORCH_API _copy_from_and_resize_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_copy_from_and_resize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & dst, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foobar_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foobar_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc90a22df20302e07b540d5a0561b4a5aa3c8c6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foobar_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _foobar(const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min_native.h new file mode 100644 index 0000000000000000000000000000000000000000..02e3e40f993af792de8684d256f391160ec134f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min_native.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_clamp_min_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_clamp_min_scalar_kernel_slow(at::TensorList self, const at::Scalar & scalar); +TORCH_API void foreach_tensor_clamp_min_scalar_kernel_slow_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector foreach_tensor_clamp_min_scalar_kernel_cuda(at::TensorList self, const at::Scalar & scalar); +TORCH_API void foreach_tensor_clamp_min_scalar_kernel_cuda_(at::TensorList self, const at::Scalar & scalar); +TORCH_API void _foreach_clamp_min_List_out(at::TensorList self, at::TensorList other, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_clamp_min_list_kernel_slow(at::TensorList self, at::TensorList other); +TORCH_API void foreach_tensor_clamp_min_list_kernel_slow_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector foreach_tensor_clamp_min_list_kernel_cuda(at::TensorList self, at::TensorList other); +TORCH_API void foreach_tensor_clamp_min_list_kernel_cuda_(at::TensorList self, at::TensorList other); +TORCH_API void _foreach_clamp_min_ScalarList_out(at::TensorList self, at::ArrayRef scalars, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_clamp_min_scalarlist_kernel_slow(at::TensorList self, at::ArrayRef scalars); +TORCH_API void foreach_tensor_clamp_min_scalarlist_kernel_slow_(at::TensorList self, at::ArrayRef scalars); +TORCH_API ::std::vector foreach_tensor_clamp_min_scalarlist_kernel_cuda(at::TensorList self, at::ArrayRef scalars); +TORCH_API void foreach_tensor_clamp_min_scalarlist_kernel_cuda_(at::TensorList self, at::ArrayRef scalars); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..384dae22eb53078ed373c233ab43020bbe0c45ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_floor(at::TensorList self); +TORCH_API void _foreach_floor_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_maximum_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_maximum_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e27be01a482bd58a3613e075d20526380df8a175 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_maximum_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _foreach_maximum_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar); +TORCH_API void _foreach_maximum_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out); +TORCH_API void _foreach_maximum_out(at::TensorList out, at::TensorList self, at::TensorList other); +TORCH_API void _foreach_maximum_outf(at::TensorList self, at::TensorList other, at::TensorList out); +TORCH_API void _foreach_maximum_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars); +TORCH_API void _foreach_maximum_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_minimum_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_minimum_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..07ae157c5eb4811c046f59c4534cdd7abc9ad992 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_minimum_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_minimum(at::TensorList self, const at::Scalar & scalar); +TORCH_API void _foreach_minimum_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector _foreach_minimum(at::TensorList self, at::TensorList other); +TORCH_API void _foreach_minimum_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector _foreach_minimum(at::TensorList self, at::ArrayRef scalars); +TORCH_API void _foreach_minimum_(at::TensorList self, at::ArrayRef scalars); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg_native.h new file mode 100644 index 0000000000000000000000000000000000000000..93c97c452959bfcf3cb010ed663b7038dc3a5266 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_neg_out(at::TensorList self, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_neg_slow(at::TensorList self); +TORCH_API void foreach_tensor_neg_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_neg_cuda(at::TensorList self); +TORCH_API void foreach_tensor_neg_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..946bec271173fa5f4d2f0150300ac2ea24823d90 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_reciprocal { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_reciprocal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_reciprocal(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_reciprocal_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_reciprocal_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_reciprocal_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_reciprocal_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_reciprocal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt.h new file mode 100644 index 0000000000000000000000000000000000000000..84400e1dd4cb00304502d931c31f67358ee2e0aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_sqrt(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_sqrt(at::TensorList self) { + return at::_ops::_foreach_sqrt::call(self); +} + +// aten::_foreach_sqrt_(Tensor(a!)[] self) -> () +inline void _foreach_sqrt_(at::TensorList self) { + return at::_ops::_foreach_sqrt_::call(self); +} + +// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_sqrt_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sqrt_out::call(self, out); +} +// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_sqrt_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sqrt_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sgd.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sgd.h new file mode 100644 index 0000000000000000000000000000000000000000..9f16f621b345113a4e265abcd5de19e8095bc5e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sgd.h @@ -0,0 +1,63 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> () +inline void _fused_sgd_(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_sgd_::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); +} + +// aten::_fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> () +inline void _fused_sgd_(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_sgd__tensor_lr::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); +} + +// aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_sgd_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_sgd_out::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out); +} +// aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_sgd_outf(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_sgd_out::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out); +} + +// aten::_fused_sgd(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out) +inline ::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_sgd::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); +} + +// aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_sgd_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_sgd_tensor_lr_out::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out); +} +// aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_sgd_outf(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_sgd_tensor_lr_out::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out); +} + +// aten::_fused_sgd.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out) +inline ::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_sgd_tensor_lr::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3a94129482291c01fb037fb03a93fabc262b83d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _index_put_impl_ { + using schema = at::Tensor & (at::Tensor &, const c10::List> &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_index_put_impl_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, bool unsafe); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, bool unsafe); +}; + +struct TORCH_API _index_put_impl_out { + using schema = at::Tensor & (const at::Tensor &, const c10::List> &, const at::Tensor &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_index_put_impl") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out); +}; + +struct TORCH_API _index_put_impl { + using schema = at::Tensor (const at::Tensor &, const c10::List> &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_index_put_impl") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, bool unsafe); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, bool unsafe); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_storage_offsets.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_storage_offsets.h new file mode 100644 index 0000000000000000000000000000000000000000..db673002a346572016f08a6fb29cc4b0073ca029 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_storage_offsets.h @@ -0,0 +1,34 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_tensor_storage_offsets_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::_nested_tensor_storage_offsets_out::call(self, out); +} +// aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_tensor_storage_offsets_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::_nested_tensor_storage_offsets_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_circular.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_circular.h new file mode 100644 index 0000000000000000000000000000000000000000..e6742ad683ff08b4c0def4ca327c4f3af33405c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_circular.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor +inline at::Tensor _pad_circular(const at::Tensor & self, at::IntArrayRef pad) { + return at::_ops::_pad_circular::call(self, c10::fromIntArrayRefSlow(pad)); +} +namespace symint { + template ::value>> + at::Tensor _pad_circular(const at::Tensor & self, at::IntArrayRef pad) { + return at::_ops::_pad_circular::call(self, c10::fromIntArrayRefSlow(pad)); + } +} + +// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor +inline at::Tensor _pad_circular_symint(const at::Tensor & self, c10::SymIntArrayRef pad) { + return at::_ops::_pad_circular::call(self, pad); +} +namespace symint { + template ::value>> + at::Tensor _pad_circular(const at::Tensor & self, c10::SymIntArrayRef pad) { + return at::_ops::_pad_circular::call(self, pad); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..41f2cad2c4d31a7caac94c7ebf4bd4d4393a2234 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor _sparse_broadcast_to_copy(const at::Tensor & self, at::IntArrayRef size); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e4b701e23964325f0c08b875abf0b9cbd2750011 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _standard_gamma(const at::Tensor & self, c10::optional generator=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d29eaa40ecbaa4f85f507ca0734918a3f5917f09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _test_functorch_fallback(const at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7896e26f7f8ea5190502f37d493f61564fad3c76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _to_dense_out(at::Tensor & out, const at::Tensor & self, c10::optional dtype=c10::nullopt, c10::optional masked_grad=c10::nullopt); +TORCH_API at::Tensor & _to_dense_outf(const at::Tensor & self, c10::optional dtype, c10::optional masked_grad, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3bea77708d5033be909143cada38894441e83cad --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured__upsample_bicubic2d_aa_backward_out_cpu : public at::meta::structured__upsample_bicubic2d_aa_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, const at::Tensor & grad_input); +}; +struct TORCH_API structured__upsample_bicubic2d_aa_backward_out_cuda : public at::meta::structured__upsample_bicubic2d_aa_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d7b0310b745941fa1a5c73c8ea1d2c6a839036ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _upsample_bicubic2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor _upsample_bicubic2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..af1a897eef6e2f181282c8d292b9c352f5098ffd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured__upsample_bilinear2d_aa_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_rnn_flatten_weight_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_rnn_flatten_weight_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5a5cc35b4a7e0e919d0a42fba127aab216f37b3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_rnn_flatten_weight_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool _use_cudnn_rnn_flatten_weight(); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/abs_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/abs_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..525396d407c731a5137f91f307b58e6d620e097f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/abs_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor abs(const at::Tensor & self); +TORCH_API at::Tensor & abs_(at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/acosh_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/acosh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..574217ea22005352e027d94a892dd6cf9eab649a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/acosh_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API acosh { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::acosh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "acosh(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API acosh_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::acosh_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "acosh_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API acosh_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::acosh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/add_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/add_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e19bc4649a705a29bd9aed8c728a384221b05735 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/add_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API add_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); +}; + +struct TORCH_API add__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::add_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); +}; + +struct TORCH_API add_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +}; + +struct TORCH_API add_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); +}; + +struct TORCH_API add__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::add_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); +}; + +struct TORCH_API add_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/alias_copy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/alias_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..303fc978f22fe6f3fc9a2928f12ff8d7c7b27bc5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/alias_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::alias_copy(Tensor self) -> Tensor +inline at::Tensor alias_copy(const at::Tensor & self) { + return at::_ops::alias_copy::call(self); +} + +// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & alias_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::alias_copy_out::call(self, out); +} +// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & alias_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::alias_copy_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/allclose_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/allclose_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c6fb96ef8c2dea0482e71831a7fbd14be60c9d15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/allclose_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API allclose { + using schema = bool (const at::Tensor &, const at::Tensor &, double, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::allclose") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool") + static bool call(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arccos.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arccos.h new file mode 100644 index 0000000000000000000000000000000000000000..4669f4d1ee112147dd9660833422906aabf197ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arccos.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::arccos(Tensor self) -> Tensor +inline at::Tensor arccos(const at::Tensor & self) { + return at::_ops::arccos::call(self); +} + +// aten::arccos_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & arccos_(at::Tensor & self) { + return at::_ops::arccos_::call(self); +} + +// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arccos_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::arccos_out::call(self, out); +} +// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arccos_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::arccos_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asin_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asin_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..70012b262494c2d6edf2f941592a90a5aa572105 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asin_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor asin(const at::Tensor & self); +TORCH_API at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & asin_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/baddbmm_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/baddbmm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..15780c45c6c484e6942271bf3b48e8e8ce48e5cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/baddbmm_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API baddbmm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::baddbmm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); +}; + +struct TORCH_API baddbmm_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::baddbmm_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); +}; + +struct TORCH_API baddbmm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::baddbmm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binomial_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binomial_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e0c135d6e76daba4d60b81c3afb6061c4e0e192 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binomial_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor binomial(const at::Tensor & count, const at::Tensor & prob, c10::optional generator=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf94c9e57d6d19a7bea8a2006c2ef250c837e765 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor bitwise_not(const at::Tensor & self); +TORCH_API at::Tensor & bitwise_not_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & bitwise_not_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & bitwise_not_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a3b2cd3cae43c69fcc3d4abe8d2f92d09c20d9d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor bitwise_xor(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_xor_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0bc63c65d84860e1863e82c2c524c2188326e366 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API block_diag { + using schema = at::Tensor (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::block_diag") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "block_diag(Tensor[] tensors) -> Tensor") + static at::Tensor call(at::TensorList tensors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API block_diag_out { + using schema = at::Tensor & (at::TensorList, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::block_diag") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::TensorList tensors, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/chain_matmul_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/chain_matmul_native.h new file mode 100644 index 0000000000000000000000000000000000000000..360e68aa3056d2dfab5daaf8256324c20eb7208c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/chain_matmul_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor chain_matmul(at::TensorList matrices); +TORCH_API at::Tensor & chain_matmul_out(at::TensorList matrices, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e7eef9e550737d2ec979f2ab0c661187cc47e94d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor crow_indices_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b30bbd728621cf7c5247294c0b368f9fed50802b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & cudnn_affine_grid_generator_out(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out); +TORCH_API at::Tensor cudnn_affine_grid_generator_forward(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/data_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/data_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8dee6aef370181014cf43e5e303114cece2930d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/data_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor data(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine.h new file mode 100644 index 0000000000000000000000000000000000000000..cf1dd5b1b3c6b229c5d1fa37b42c210cbe69d204 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor +inline at::Tensor fake_quantize_per_tensor_affine(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max); +} + +// aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor +inline at::Tensor fake_quantize_per_tensor_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_tensor_affine_tensor_qparams::call(self, scale, zero_point, quant_min, quant_max); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/feature_alpha_dropout_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/feature_alpha_dropout_native.h new file mode 100644 index 0000000000000000000000000000000000000000..af648fcf4982fb2a6d5eba9ac93d85fb8a15b220 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/feature_alpha_dropout_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor feature_alpha_dropout(const at::Tensor & input, double p, bool train); +TORCH_API at::Tensor & feature_alpha_dropout_(at::Tensor & self, double p, bool train); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft.h new file mode 100644 index 0000000000000000000000000000000000000000..6469d6c39681b72e484a34d3e0a2211d9d6f7783 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_hfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_hfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); + } +} + +// aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_hfft_symint(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft::call(self, n, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_hfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft::call(self, n, dim, norm); + } +} + +// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_hfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_hfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_hfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_hfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_hfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_hfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_hfft_symint_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_hfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft_out::call(self, n, dim, norm, out); + } +} + +// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_hfft_symint_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_hfft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_hfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_hfft_out::call(self, n, dim, norm, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft2_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft2_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c0f2230846366811bf04c77417153cf2938929a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft2_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_hfft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt); +TORCH_API const at::Tensor & fft_hfft2_symint_out(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft2_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5b1d4206b2a7cc1bf849c40a884c3eff5042a3c5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft2_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fft_ihfft2 { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_ihfft2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm); +}; + +struct TORCH_API fft_ihfft2_out { + using schema = const at::Tensor & (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, c10::optional, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_ihfft2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bb99b3f3e9a590d5130d2c0de929ae25baed0bf8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hstack(at::TensorList tensors); +TORCH_API at::Tensor & hstack_out(at::TensorList tensors, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..8373799b35a254400d5815eaa852944f9fc0b343 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor +inline at::Tensor instance_norm(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { + return at::_ops::instance_norm::call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..66dee7e90ee1c980cbac2acb5f90325462a797cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_pinned { + using schema = bool (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::is_pinned") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "is_pinned(Tensor self, Device? device=None) -> bool") + static bool call(const at::Tensor & self, c10::optional device); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional device); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b127f01036d92fc97c5ebb6ea56d3952c237d3ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lerp__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +}; + +struct TORCH_API lerp__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +}; + +struct TORCH_API lerp_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); +}; + +struct TORCH_API lerp_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); +}; + +struct TORCH_API lerp_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +}; + +struct TORCH_API lerp_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9a0f37286cf8a364a7ce77c54dfd2e9dc4352b8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lgamma(const at::Tensor & self); +TORCH_API at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & lgamma_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ea61a4b6f4a75676c002be8118e439e3fa0a1325 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_lu_factor_ex_out : public at::meta::structured_linalg_lu_factor_ex { +void impl(const at::Tensor & A, bool pivot, bool check_errors, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_pinv_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_pinv_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b2f492701182549dba0a5c3f11947caef69dea4b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_pinv_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor linalg_pinv(const at::Tensor & self, const c10::optional & atol={}, const c10::optional & rtol={}, bool hermitian=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_slogdet_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_slogdet_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a726affcd3d64e04e5d73f5ac719f32a92e60966 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_slogdet_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_slogdet { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_slogdet") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)") + static ::std::tuple call(const at::Tensor & A); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A); +}; + +struct TORCH_API linalg_slogdet_out { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_slogdet") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)") + static ::std::tuple call(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ae380cc18fdaa690dc9b5a0d8cca27e64e8764a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_tensorinv(const at::Tensor & self, int64_t ind=2); +TORCH_API at::Tensor & linalg_tensorinv_out(at::Tensor & out, const at::Tensor & self, int64_t ind=2); +TORCH_API at::Tensor & linalg_tensorinv_outf(const at::Tensor & self, int64_t ind, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c9ff2c639b28b508fb6e75d5a93e6b47ddd8c477 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & masked_scatter_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select.h new file mode 100644 index 0000000000000000000000000000000000000000..707b714f4d94354d314b01a3bf3b4f7f0992ae1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & masked_select_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask) { + return at::_ops::masked_select_out::call(self, mask, out); +} +// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & masked_select_outf(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) { + return at::_ops::masked_select_out::call(self, mask, out); +} + +// aten::masked_select(Tensor self, Tensor mask) -> Tensor +inline at::Tensor masked_select(const at::Tensor & self, const at::Tensor & mask) { + return at::_ops::masked_select::call(self, mask); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3279c68175282487af83f8e573a9e680f56267b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & median_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor median_cpu(const at::Tensor & self); +TORCH_API at::Tensor median_cuda(const at::Tensor & self); +TORCH_API ::std::tuple median(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple median_out_cpu(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple median_out_cuda(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple median(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple median_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/min_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/min_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..13032f04eaee9a0c35286f9e192437ebea2c9cbc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/min_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple min(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple min_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices); +TORCH_API at::Tensor min(const at::Tensor & self); +TORCH_API at::Tensor & min_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & min_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_depthwise_convolution.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_depthwise_convolution.h new file mode 100644 index 0000000000000000000000000000000000000000..637caa577341f3ebcae7ab61c8d304e0a7b0c8f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_depthwise_convolution.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor +inline at::Tensor miopen_depthwise_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic); +} +namespace symint { + template ::value>> + at::Tensor miopen_depthwise_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic); + } +} + +// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor +inline at::Tensor miopen_depthwise_convolution_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); +} +namespace symint { + template ::value>> + at::Tensor miopen_depthwise_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); + } +} + +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & miopen_depthwise_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); +} +namespace symint { + template ::value>> + at::Tensor & miopen_depthwise_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); + } +} + +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & miopen_depthwise_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); +} +namespace symint { + template ::value>> + at::Tensor & miopen_depthwise_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); + } +} + +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & miopen_depthwise_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); +} +namespace symint { + template ::value>> + at::Tensor & miopen_depthwise_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } +} + +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & miopen_depthwise_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); +} +namespace symint { + template ::value>> + at::Tensor & miopen_depthwise_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..37cebd31c379399936156bcc0a4db2f850b8cb9e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API narrow { + using schema = at::Tensor (const at::Tensor &, int64_t, c10::SymInt, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::narrow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); +}; + +struct TORCH_API narrow_Tensor { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::narrow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fd828c867c69a7cdc166fd99a45b085e5b742f29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_nll_loss_forward_out_cpu : public at::meta::structured_nll_loss_forward { +void impl(const at::Tensor & self, const at::Tensor & target, at::OptionalTensorRef weight, int64_t reduction, int64_t ignore_index, const at::Tensor & output, const at::Tensor & total_weight); +}; +struct TORCH_API structured_nll_loss_forward_out_cuda : public at::meta::structured_nll_loss_forward { +void impl(const at::Tensor & self, const at::Tensor & target, at::OptionalTensorRef weight, int64_t reduction, int64_t ignore_index, const at::Tensor & output, const at::Tensor & total_weight); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/prod_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/prod_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e7370f0250146d02782d73cdf3300c9293184c8a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/prod_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor prod(const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantile.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantile.h new file mode 100644 index 0000000000000000000000000000000000000000..b8e1002fcacd38d90d8d272b0948f9f9d6b39a9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantile.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor quantile(const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile::call(self, q, dim, keepdim, interpolation); +} + +// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantile_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_out::call(self, q, dim, keepdim, interpolation, out); +} +// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantile_outf(const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::quantile_out::call(self, q, dim, keepdim, interpolation, out); +} + +// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor quantile(const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_scalar::call(self, q, dim, keepdim, interpolation); +} + +// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantile_out(at::Tensor & out, const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_scalar_out::call(self, q, dim, keepdim, interpolation, out); +} +// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantile_outf(const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::quantile_scalar_out::call(self, q, dim, keepdim, interpolation, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca843b40a3a2e1f879cd5d3eb2a5d48632faed7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_compositeexplicitautograd_dispatch.h @@ -0,0 +1,46 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional generator, at::TensorOptions options={}); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options={}); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional names); +TORCH_API at::Tensor & randn_outf(at::IntArrayRef size, c10::optional names, at::Tensor & out); +TORCH_API at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional names); +TORCH_API at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional names, at::Tensor & out); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional generator, c10::optional names); +TORCH_API at::Tensor & randn_outf(at::IntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out); +TORCH_API at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator, c10::optional names); +TORCH_API at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/resize_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/resize_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8fa7d4ff0c973b2df21f6e2da3ae7eb306573bca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/resize_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt); +TORCH_API const at::Tensor & resize__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj.h new file mode 100644 index 0000000000000000000000000000000000000000..16baec799a0056dcdf077dec9de5e55ee65c915b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::resolve_conj(Tensor(a) self) -> Tensor(a) +inline at::Tensor resolve_conj(const at::Tensor & self) { + return at::_ops::resolve_conj::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/row_stack_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/row_stack_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1c3aeeed16af08b94d2adba2be62cc7dc371dfaf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/row_stack_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor row_stack(at::TensorList tensors); +TORCH_API at::Tensor & row_stack_out(at::Tensor & out, at::TensorList tensors); +TORCH_API at::Tensor & row_stack_outf(at::TensorList tensors, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_v_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_v_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8e976d80c3f737a868d9c4aeffbb281e4444fd7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_v_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_chebyshev_polynomial_v { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_v") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n); +}; + +struct TORCH_API special_chebyshev_polynomial_v_x_scalar { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_v") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor") + static at::Tensor call(const at::Scalar & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n); +}; + +struct TORCH_API special_chebyshev_polynomial_v_n_scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_v") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Scalar & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n); +}; + +struct TORCH_API special_chebyshev_polynomial_v_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_v") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_chebyshev_polynomial_v_x_scalar_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_v") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_chebyshev_polynomial_v_n_scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_v") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eed4e1cff11d87ac33d0b6eb36bddfa644c45f31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_i0e(const at::Tensor & self); +TORCH_API at::Tensor & special_i0e_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_i0e_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sspaddmm_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sspaddmm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a30dda6cc94c7e9aa95d7d83934cf4fb327d8a5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sspaddmm_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & sspaddmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sspaddmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trapz.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trapz.h new file mode 100644 index 0000000000000000000000000000000000000000..30ccda2d946eb64685e30f30ef95023aaf21d293 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trapz.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor +inline at::Tensor trapz(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) { + return at::_ops::trapz_x::call(y, x, dim); +} + +// aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor +inline at::Tensor trapz(const at::Tensor & y, double dx=1, int64_t dim=-1) { + return at::_ops::trapz_dx::call(y, dx, dim); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triangular_solve_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triangular_solve_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..615866269f1ff66aae727d7e4c392bf1d2c18a05 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triangular_solve_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple triangular_solve(const at::Tensor & self, const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false); +TORCH_API ::std::tuple triangular_solve_out(at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false); +TORCH_API ::std::tuple triangular_solve_outf(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..40a0406fb2e6894b4c7a2d25d34da8c31b80f006 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/type_as_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/type_as_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7c3f58b82ca0ad2b82bb4589277ab1d4711729c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/type_as_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor type_as(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_with_sizes_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_with_sizes_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..22d7b1871986e8d1394dc190a57c5440b4105973 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_with_sizes_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API unsafe_split_with_sizes { + using schema = ::std::vector (const at::Tensor &, c10::SymIntArrayRef, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unsafe_split_with_sizes") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]") + static ::std::vector call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim); +}; + +struct TORCH_API unsafe_split_with_sizes_out { + using schema = void (const at::Tensor &, c10::SymIntArrayRef, int64_t, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unsafe_split_with_sizes") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()") + static void call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2f5e8085958f00e1bfc7d6c908b2e9bfc279d4e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_upsample_nearest3d_backward_out_cpu : public at::meta::structured_upsample_nearest3d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, const at::Tensor & grad_input); +}; +struct TORCH_API structured_upsample_nearest3d_backward_out_cuda : public at::meta::structured_upsample_nearest3d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1ab0d8d33d62ae45b68cd680b5b7309d9638b133 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_trilinear3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at