diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e15c2c744f1480f999ec4a231da2d25354dffbf2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6f1588b08b51eff84953b137aa4914d36e294b14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false); +TORCH_API at::Tensor & _addmm_activation_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false); +TORCH_API at::Tensor & _addmm_activation_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision.h new file mode 100644 index 0000000000000000000000000000000000000000..5100a9527dd43f1d7d9728118fb035b37efe28a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_weight_to_int4pack_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_weight_to_int4pack_native.h new file mode 100644 index 0000000000000000000000000000000000000000..37476d620d3939d4da55093d27a22071eb3a9333 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_weight_to_int4pack_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _convert_weight_to_int4pack_cpu(const at::Tensor & self, int64_t innerKTiles); +TORCH_API at::Tensor _convert_weight_to_int4pack_cuda(const at::Tensor & self, int64_t innerKTiles); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7c5471e8b8e910a4a7062bef6212e1f5c0ef5653 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_addcmul_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_addcmul_native.h new file mode 100644 index 0000000000000000000000000000000000000000..828870361b20b215c4d575f82dd75c8343cdd44d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_addcmul_native.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_addcmul_Scalar_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_addcmul_scalar_slow(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1); +TORCH_API void foreach_tensor_addcmul_scalar_slow_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1); +TORCH_API ::std::vector foreach_tensor_addcmul_scalar_cuda(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1); +TORCH_API void foreach_tensor_addcmul_scalar_cuda_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1); +TORCH_API void _foreach_addcmul_ScalarList_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_addcmul_scalarlist_slow(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars); +TORCH_API void foreach_tensor_addcmul_scalarlist_slow_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars); +TORCH_API ::std::vector foreach_tensor_addcmul_scalarlist_cuda(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars); +TORCH_API void foreach_tensor_addcmul_scalarlist_cuda_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars); +TORCH_API void _foreach_addcmul_Tensor_out(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_addcmul_tensor_slow(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars); +TORCH_API void foreach_tensor_addcmul_tensor_slow_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars); +TORCH_API ::std::vector foreach_tensor_addcmul_tensor_cuda(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars); +TORCH_API void foreach_tensor_addcmul_tensor_cuda_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0859f22c1bd775beac70f4d27609be489e6a20ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_clamp_min(at::TensorList self, const at::Scalar & scalar); +TORCH_API void _foreach_clamp_min_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector _foreach_clamp_min(at::TensorList self, at::TensorList other); +TORCH_API void _foreach_clamp_min_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector _foreach_clamp_min(at::TensorList self, at::ArrayRef scalars); +TORCH_API void _foreach_clamp_min_(at::TensorList self, at::ArrayRef scalars); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e365187a7d6a8513e9a1f9cbd8148d1de2f333e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_reciprocal(at::TensorList self); +TORCH_API void _foreach_reciprocal_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tanh_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tanh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6b261fc131cb71b27ac2448fac375de2f33c6c06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tanh_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_tanh { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_tanh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_tanh(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_tanh_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_tanh_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_tanh_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_tanh_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_tanh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_dropout_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_dropout_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ce96b7837e0c7f7b854cbcfc270b5a3d2069c974 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_dropout_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple _fused_dropout_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, double p, c10::optional generator=c10::nullopt); +TORCH_API ::std::tuple _fused_dropout_outf(const at::Tensor & self, double p, c10::optional generator, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sgd_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sgd_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ace9c2d7cf78d35ce8b8922a697a07d279e5f026 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sgd_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); +TORCH_API void _fused_sgd_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); +TORCH_API void _fused_sgd_outf(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out); +TORCH_API ::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); +TORCH_API void _fused_sgd_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); +TORCH_API void _fused_sgd_outf(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_gather_sparse_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_gather_sparse_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c4d546942f8608e8f91b149cc94fa4590dfc7799 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_gather_sparse_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _gather_sparse_backward { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_gather_sparse_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_cts_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_cts_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5a1c6f509077000535bc622334bc42b20f01c13c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_cts_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _histogramdd_from_bin_cts_out(const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, at::Tensor & out); +TORCH_API at::Tensor _histogramdd(const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..35c906a3224ae5c15f1b03f943ea8891b342a3ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple _linalg_slogdet(const at::Tensor & A); +TORCH_API ::std::tuple _linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A); +TORCH_API ::std::tuple _linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded.h new file mode 100644 index 0000000000000000000000000000000000000000..638abacb28036607d8ea6be8d96eb7282fd5fe52 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor +inline at::Tensor _nested_from_padded(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213=false) { + return at::_ops::_nested_from_padded::call(padded, cpu_nested_shape_example, fuse_transform_0213); +} + +// aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_from_padded_out(at::Tensor & out, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213=false) { + return at::_ops::_nested_from_padded_out::call(padded, cpu_nested_shape_example, fuse_transform_0213, out); +} +// aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_from_padded_outf(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out) { + return at::_ops::_nested_from_padded_out::call(padded, cpu_nested_shape_example, fuse_transform_0213, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded_and_nested_example.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded_and_nested_example.h new file mode 100644 index 0000000000000000000000000000000000000000..66ad828ef0c202f7b5c9effe7c2d51abb926c2a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded_and_nested_example.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor +inline at::Tensor _nested_from_padded_and_nested_example(const at::Tensor & padded, const at::Tensor & nt_example) { + return at::_ops::_nested_from_padded_and_nested_example::call(padded, nt_example); +} + +// aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_from_padded_and_nested_example_out(at::Tensor & out, const at::Tensor & padded, const at::Tensor & nt_example) { + return at::_ops::_nested_from_padded_and_nested_example_out::call(padded, nt_example, out); +} +// aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_from_padded_and_nested_example_outf(const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out) { + return at::_ops::_nested_from_padded_and_nested_example_out::call(padded, nt_example, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ca7754b6390232be7b4de7a501900444b5cc6000 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nested_sum_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::OptionalIntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_sum_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_available_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_available_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..da277eb8cda19949d51efb2af3c20594f9b887ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_available_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nnpack_available { + using schema = bool (); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nnpack_available") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nnpack_available() -> bool") + static bool call(); + static bool redispatch(c10::DispatchKeySet dispatchKeySet); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_remove_batch_dim.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_remove_batch_dim.h new file mode 100644 index 0000000000000000000000000000000000000000..a864fb1672799a233cd12cde95297f01ba7e3cc8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_remove_batch_dim.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor +inline at::Tensor _remove_batch_dim(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) { + return at::_ops::_remove_batch_dim::call(self, level, batch_size, out_dim); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_ff.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_ff.h new file mode 100644 index 0000000000000000000000000000000000000000..31f6d99f46272c090cbbf32f54ba2ee811bf1b06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_ff.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!) +inline at::Tensor & _sobol_engine_ff_(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) { + return at::_ops::_sobol_engine_ff_::call(self, n, sobolstate, dimension, num_generated); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_backward_data_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_backward_data_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..90c9b73e02689ec145f196305bc143f58797362d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_backward_data_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _softmax_backward_data_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_check_tensor_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_check_tensor_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d5b92060f7356366dfed412ac23cb841c6159872 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_check_tensor_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _test_check_tensor(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..b716162d690981abe0c6667c47b95fac92ebd659 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) +inline ::std::tuple _thnn_fused_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_gru_cell_backward::call(grad_hy, workspace, has_bias); +} + +// aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) +inline ::std::tuple _thnn_fused_gru_cell_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_gru_cell_backward_out::call(grad_hy, workspace, has_bias, out0, out1, out2, out3, out4); +} +// aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) +inline ::std::tuple _thnn_fused_gru_cell_backward_outf(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { + return at::_ops::_thnn_fused_gru_cell_backward_out::call(grad_hy, workspace, has_bias, out0, out1, out2, out3, out4); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unique2.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unique2.h new file mode 100644 index 0000000000000000000000000000000000000000..a8e047d2d31a3fad23b1f8e3191a15325d078cf8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unique2.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) +inline ::std::tuple _unique2(const at::Tensor & self, bool sorted=true, bool return_inverse=false, bool return_counts=false) { + return at::_ops::_unique2::call(self, sorted, return_inverse, return_counts); +} + +// aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _unique2_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, bool sorted=true, bool return_inverse=false, bool return_counts=false) { + return at::_ops::_unique2_out::call(self, sorted, return_inverse, return_counts, out0, out1, out2); +} +// aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _unique2_outf(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_unique2_out::call(self, sorted, return_inverse, return_counts, out0, out1, out2); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unpack_dual.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unpack_dual.h new file mode 100644 index 0000000000000000000000000000000000000000..2f61b659c9fd61d40b57a3e559955bfe76b42f49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unpack_dual.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent) +inline ::std::tuple _unpack_dual(const at::Tensor & dual, int64_t level) { + return at::_ops::_unpack_dual::call(dual, level); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index.h new file mode 100644 index 0000000000000000000000000000000000000000..736c9dd987c2cf1b58b4faeb3478f98ed7f66b76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor +inline at::Tensor _unsafe_index(const at::Tensor & self, const c10::List> & indices) { + return at::_ops::_unsafe_index_Tensor::call(self, indices); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9c345d9f8d1554826075eebc4f7aa21c62d68ca2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor _upsample_bicubic2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/add_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/add_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..935b184402a06f99844e2b8088dea574ec6ed0c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/add_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor add(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & add_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amin_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amin_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4fa55718355377e7d15a2f762de3c9b6968d9cf7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amin_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor amin(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); +TORCH_API at::Tensor & amin_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); +TORCH_API at::Tensor & amin_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..13168d0d014e941b2aa9763c1cc484978f254df3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor as_strided_copy(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt); +TORCH_API at::Tensor as_strided_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9bcb852633608c23608b99fb457032332ff03b85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor batch_norm_backward_elemt(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f290700d4ee6984e8e8e16c678957736ee92c600 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor bincount(const at::Tensor & self, const c10::optional & weights={}, int64_t minlength=0); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f14e8031a4df14ba78dc10750c241defa62fdd42 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_right_shift_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_right_shift_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_right_shift_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clone.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clone.h new file mode 100644 index 0000000000000000000000000000000000000000..158d125b743c6ff90e4cfe42ed925176e3df1dc2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clone.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor clone(const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::clone::call(self, memory_format); +} + +// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & clone_out(at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::clone_out::call(self, memory_format, out); +} +// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & clone_outf(const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::clone_out::call(self, memory_format, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..705b2331d94b773e564c9161515abd5ba4afd915 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0); +TORCH_API at::Tensor & constant_pad_nd_out_symint(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..869f138b1194711cbb7599d97d2900b5daaa7239 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API diagonal_backward { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, int64_t, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diagonal_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2); +}; + +struct TORCH_API diagonal_backward_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, int64_t, int64_t, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diagonal_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfinv_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfinv_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..61d9d6579f355db68aa6b82c644006fe22af8bb9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfinv_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor erfinv(const at::Tensor & self); +TORCH_API at::Tensor & erfinv_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & erfinv_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & erfinv_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3d805f35c5dce9d3b7ecea9fcef08ec5d3e75711 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor flatten_dense_tensors(at::TensorList tensors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor.h new file mode 100644 index 0000000000000000000000000000000000000000..91a5e249b2eaab5af25d079072c709b9ee8d998f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::floor(Tensor self) -> Tensor +inline at::Tensor floor(const at::Tensor & self) { + return at::_ops::floor::call(self); +} + +// aten::floor_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & floor_(at::Tensor & self) { + return at::_ops::floor_::call(self); +} + +// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::floor_out::call(self, out); +} +// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::floor_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_native.h new file mode 100644 index 0000000000000000000000000000000000000000..49aed94e12452bf3b12a74468114e2be1efe6e5a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor fmod(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & fmod_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & fmod_(at::Tensor & self, const at::Scalar & other); +struct TORCH_API structured_fmod_out : public at::meta::structured_fmod_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/frac_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/frac_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4b77882d7756f03fd5ebf2b73fc5ef0ba0a25b2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/frac_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor frac(const at::Tensor & self); +TORCH_API at::Tensor & frac_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..62684b1ac058212095c6435e0c28ed1d72b32125 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..802b83632fb36510e1859ba333f67df84360298c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference.h new file mode 100644 index 0000000000000000000000000000000000000000..61a0783697ce0ebee6ec99bed41dd983fb477623 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_inference(Tensor self) -> bool +inline bool __dispatch_is_inference(const at::Tensor & self) { + return at::_ops::is_inference::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg.h new file mode 100644 index 0000000000000000000000000000000000000000..f5fa66570c99c8ce555bdde5265126eb4b7503fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_neg(Tensor self) -> bool +inline bool __dispatch_is_neg(const at::Tensor & self) { + return at::_ops::is_neg::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e06a867a3f36a53486cbb1c50d2a9a72ab62924d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API bool is_set_to(const at::Tensor & self, const at::Tensor & tensor); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..20957f2a9243d04e0679c7bf5fd4ffe443570ac8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & lift_fresh_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lift_fresh_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..b4d895e76770670a078c71ffeb0f9f7a915372b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_vector_norm : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f4d67377d710ba6fdb83e22773d4f79809e3abeb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill_outf(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +TORCH_API at::Tensor & masked_fill_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +TORCH_API at::Tensor & masked_fill_outf(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..467b39492e0dd5a5d2ffedacca3047a9d13e2857 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API masked_scatter_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::masked_scatter_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6244dbc3d042b5d1a0f52d34db577ce73960f091 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor maximum(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5f5d690fbef2c8ffc4b27e0dd37ebddda29766e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor maximum(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nextafter_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nextafter_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..966a835e28805362dd75061f8074ce1cdec65ac1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nextafter_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor nextafter(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & nextafter_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/one_hot_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/one_hot_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5be948361d64c577972d38d4146266963bb55b0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/one_hot_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor one_hot(const at::Tensor & self, int64_t num_classes=-1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_native.h new file mode 100644 index 0000000000000000000000000000000000000000..aff962e441c52e61545a5eb7a349a01e7e1852c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor permute(const at::Tensor & self, at::IntArrayRef dims); +TORCH_API at::Tensor permute_sparse_coo(const at::Tensor & self, at::IntArrayRef dims); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8cc01dc7d8dfad0cda84a9143aad18dd8f86bcee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor randint_like(const at::Tensor & self, int64_t high, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor & randint_like_out_symint(const at::Tensor & self, c10::SymInt high, c10::optional memory_format, at::Tensor & out); +TORCH_API at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor & randint_like_low_dtype_out_symint(const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional memory_format, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/relu.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/relu.h new file mode 100644 index 0000000000000000000000000000000000000000..cfe28ff41456abe095ce130889b56f387f153c44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/relu.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::relu(Tensor self) -> Tensor +inline at::Tensor relu(const at::Tensor & self) { + return at::_ops::relu::call(self); +} + +// aten::relu_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & relu_(at::Tensor & self) { + return at::_ops::relu_::call(self); +} + +// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & relu_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::relu_out::call(self, out); +} +// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & relu_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::relu_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave.h new file mode 100644 index 0000000000000000000000000000000000000000..36c47352ba6a66ee439a3a788f0e06f69be8dbee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave.h @@ -0,0 +1,135 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor +inline at::Tensor repeat_interleave(const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor::call(repeats, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt); +} +namespace symint { + template ::value>> + at::Tensor repeat_interleave(const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor::call(repeats, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt); + } +} + +// aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor +inline at::Tensor repeat_interleave_symint(const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor::call(repeats, output_size); +} +namespace symint { + template ::value>> + at::Tensor repeat_interleave(const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor::call(repeats, output_size); + } +} + +// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor +inline at::Tensor repeat_interleave(const at::Tensor & self, const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_Tensor::call(self, repeats, dim, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt); +} +namespace symint { + template ::value>> + at::Tensor repeat_interleave(const at::Tensor & self, const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_Tensor::call(self, repeats, dim, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt); + } +} + +// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor +inline at::Tensor repeat_interleave_symint(const at::Tensor & self, const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_Tensor::call(self, repeats, dim, output_size); +} +namespace symint { + template ::value>> + at::Tensor repeat_interleave(const at::Tensor & self, const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_Tensor::call(self, repeats, dim, output_size); + } +} + +// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor +inline at::Tensor repeat_interleave(const at::Tensor & self, int64_t repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt); +} +namespace symint { + template ::value>> + at::Tensor repeat_interleave(const at::Tensor & self, int64_t repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt); + } +} + +// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor +inline at::Tensor repeat_interleave_symint(const at::Tensor & self, c10::SymInt repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size); +} +namespace symint { + template ::value>> + at::Tensor repeat_interleave(const at::Tensor & self, c10::SymInt repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size); + } +} + +// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & repeat_interleave_out(at::Tensor & out, const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor_out::call(repeats, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt, out); +} +namespace symint { + template ::value>> + at::Tensor & repeat_interleave_out(at::Tensor & out, const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor_out::call(repeats, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt, out); + } +} + +// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & repeat_interleave_outf(const at::Tensor & repeats, c10::optional output_size, at::Tensor & out) { + return at::_ops::repeat_interleave_Tensor_out::call(repeats, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt, out); +} +namespace symint { + template ::value>> + at::Tensor & repeat_interleave_outf(const at::Tensor & repeats, c10::optional output_size, at::Tensor & out) { + return at::_ops::repeat_interleave_Tensor_out::call(repeats, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt, out); + } +} + +// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & repeat_interleave_symint_out(at::Tensor & out, const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor_out::call(repeats, output_size, out); +} +namespace symint { + template ::value>> + at::Tensor & repeat_interleave_out(at::Tensor & out, const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor_out::call(repeats, output_size, out); + } +} + +// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & repeat_interleave_symint_outf(const at::Tensor & repeats, c10::optional output_size, at::Tensor & out) { + return at::_ops::repeat_interleave_Tensor_out::call(repeats, output_size, out); +} +namespace symint { + template ::value>> + at::Tensor & repeat_interleave_outf(const at::Tensor & repeats, c10::optional output_size, at::Tensor & out) { + return at::_ops::repeat_interleave_Tensor_out::call(repeats, output_size, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7aa4a8869b42c464189203e26cca32e2faf43fd6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rnn_relu_input { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rnn_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +}; + +struct TORCH_API rnn_relu_data { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rnn_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "data") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_tanh_cell.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_tanh_cell.h new file mode 100644 index 0000000000000000000000000000000000000000..9a23f91eb743d32501e133785fb18fadfccccaa8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_tanh_cell.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor +inline at::Tensor rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}) { + return at::_ops::rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rsqrt_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rsqrt_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ae9c9959cb2ba8db74000c2bb08d145b7f766724 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rsqrt_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor rsqrt(const at::Tensor & self); +TORCH_API at::Tensor & rsqrt_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & rsqrt_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & rsqrt_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4add03076f24177f3151d7d7b1b59f7d1dbbe9c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor sinc(const at::Tensor & self); +TORCH_API at::Tensor & sinc_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & sinc_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sinc_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1f387b974ebc9a2ccddd2d74b2695cc48bfe2793 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API slow_conv_transpose2d_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, const c10::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::slow_conv_transpose2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out); +}; + +struct TORCH_API slow_conv_transpose2d { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, const c10::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::slow_conv_transpose2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a59b536a52deff0fd31418afe5f3b647bd2b10cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_i0e(const at::Tensor & self); +TORCH_API at::Tensor & special_i0e_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_i0e_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1.h new file mode 100644 index 0000000000000000000000000000000000000000..63fb75d65f8095f66e08eec571c9220ab54dc705 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_i1(Tensor self) -> Tensor +inline at::Tensor special_i1(const at::Tensor & self) { + return at::_ops::special_i1::call(self); +} + +// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_i1_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_i1_out::call(self, out); +} +// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_i1_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_i1_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..7717ae152309a35778bdd17af7ffb90ce95e9227 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_laguerre_polynomial_l : public TensorIteratorBase { + + + void meta(const at::Tensor & x, const at::Tensor & n); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0dbd2b384fcf5cac1193e2e87e1d7df0944eed44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor special_legendre_polynomial_p(const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +TORCH_API at::Tensor special_legendre_polynomial_p(const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..10a44435ce067e6adba001d7cb4196aabf99c6ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_log_ndtr : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sub_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sub_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1cf0632494db529e8da505da5b4491c05f83e253 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sub_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sub_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +}; + +struct TORCH_API sub_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); +}; + +struct TORCH_API sub__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sub_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); +}; + +struct TORCH_API sub_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); +}; + +struct TORCH_API sub__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sub_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); +}; + +struct TORCH_API sub_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/subtract_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/subtract_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d8b2c01bdbb22cc90a11d18f6bd8ddac119e434d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/subtract_ops.h @@ -0,0 +1,72 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API subtract_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::subtract") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +}; + +struct TORCH_API subtract_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::subtract") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); +}; + +struct TORCH_API subtract__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::subtract_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); +}; + +struct TORCH_API subtract_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::subtract") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); +}; + +struct TORCH_API subtract__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::subtract_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tan_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tan_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b84dd7d62e6b8f00b45c5de9dc37b1906d78da79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tan_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor tan(const at::Tensor & self); +TORCH_API at::Tensor & tan_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b3e1f7f8d37888d037869f3bcf8ee62ae9310ff4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +TORCH_API at::Tensor & threshold_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +TORCH_API at::Tensor & threshold_outf(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & threshold_(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trunc_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trunc_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..e78f131e58fec111068aa42095c6bbcc219d2370 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trunc_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_trunc : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..69a71fd21ebb8649b36a0159dc0cf8ee51136f51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); + } +} + +// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_nearest1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_nearest1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); + } +} + +// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_nearest1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input); + } +} + +// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_nearest1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_nearest1d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input); + } +} + +// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor +inline at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales); +} +namespace symint { + template ::value>> + at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales); + } +} + +// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor +inline at::Tensor upsample_nearest1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward::call(grad_output, output_size, input_size, scales); +} +namespace symint { + template ::value>> + at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward::call(grad_output, output_size, input_size, scales); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..616a4d16c5c105c73a9342d049c768e27a549f54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API view_as_real_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view_as_real_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view_as_real_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API view_as_real_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view_as_real_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops