diff --git a/ckpts/universal/global_step20/zero/18.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/18.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..c9e7f51f9c7a3c18fe5e44994d5f4ef839fb35a7 --- /dev/null +++ b/ckpts/universal/global_step20/zero/18.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a5638aee4e7acbc6aada21c989af84e5c6d5bcb32a225a33cfda8e75b8a9e88 +size 50332843 diff --git a/ckpts/universal/global_step20/zero/22.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/22.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..1eee8a15ec9c08be338d1f51c6c73a69b9b04bcd --- /dev/null +++ b/ckpts/universal/global_step20/zero/22.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:115d182ae4f5a46506939f68afefc71955c0c4bb1c379c9346ce6ac4d09a281c +size 33555627 diff --git a/ckpts/universal/global_step20/zero/22.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step20/zero/22.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..f6ed7859a7dc56b609a360489670f8d0566b8d49 --- /dev/null +++ b/ckpts/universal/global_step20/zero/22.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cfaec7b944210a8f8c540f85e81a5bd026aa31f69d55e15bb35b686f931552e +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double_native.h new file mode 100644 index 0000000000000000000000000000000000000000..30851adfca3dd02fada1706b62f57136fd144ddb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _cast_Double(const at::Tensor & self, bool non_blocking=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange.h new file mode 100644 index 0000000000000000000000000000000000000000..f0b55d220d60dbbf6b117a1691241deb44874fe2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_dim_arange(Tensor like, int dim) -> Tensor +inline at::Tensor _dim_arange(const at::Tensor & like, int64_t dim) { + return at::_ops::_dim_arange::call(like, dim); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erf.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erf.h new file mode 100644 index 0000000000000000000000000000000000000000..7a8d8def174aa87365e9a5dfdc70e9e126a2c3c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erf.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_erf(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_erf(at::TensorList self) { + return at::_ops::_foreach_erf::call(self); +} + +// aten::_foreach_erf_(Tensor(a!)[] self) -> () +inline void _foreach_erf_(at::TensorList self) { + return at::_ops::_foreach_erf_::call(self); +} + +// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_erf_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_erf_out::call(self, out); +} +// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_erf_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_erf_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_mul.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_mul.h new file mode 100644 index 0000000000000000000000000000000000000000..d81139060f6801d1bcaccac91624429ce873f3be --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_mul.h @@ -0,0 +1,101 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] +inline ::std::vector _foreach_mul(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_mul_Scalar::call(self, scalar); +} + +// aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () +inline void _foreach_mul_(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_mul__Scalar::call(self, scalar); +} + +// aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[] +inline ::std::vector _foreach_mul(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_mul_List::call(self, other); +} + +// aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> () +inline void _foreach_mul_(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_mul__List::call(self, other); +} + +// aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] +inline ::std::vector _foreach_mul(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_mul_ScalarList::call(self, scalars); +} + +// aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () +inline void _foreach_mul_(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_mul__ScalarList::call(self, scalars); +} + +// aten::_foreach_mul.Tensor(Tensor[] self, Tensor other) -> Tensor[] +inline ::std::vector _foreach_mul(at::TensorList self, const at::Tensor & other) { + return at::_ops::_foreach_mul_Tensor::call(self, other); +} + +// aten::_foreach_mul_.Tensor(Tensor(a!)[] self, Tensor other) -> () +inline void _foreach_mul_(at::TensorList self, const at::Tensor & other) { + return at::_ops::_foreach_mul__Tensor::call(self, other); +} + +// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_mul_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_mul_Scalar_out::call(self, scalar, out); +} +// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_mul_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_mul_Scalar_out::call(self, scalar, out); +} + +// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_mul_out(at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_mul_List_out::call(self, other, out); +} +// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_mul_outf(at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_mul_List_out::call(self, other, out); +} + +// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_mul_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_mul_ScalarList_out::call(self, scalars, out); +} +// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_mul_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_mul_ScalarList_out::call(self, scalars, out); +} + +// aten::_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> () +inline void _foreach_mul_out(at::TensorList out, at::TensorList self, const at::Tensor & other) { + return at::_ops::_foreach_mul_Tensor_out::call(self, other, out); +} +// aten::_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> () +inline void _foreach_mul_outf(at::TensorList self, const at::Tensor & other, at::TensorList out) { + return at::_ops::_foreach_mul_Tensor_out::call(self, other, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2712134847d4fea3c58c91f02a0251ba627f2eb5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_neg(at::TensorList self); +TORCH_API void _foreach_neg_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c84d75d4dcda847578f5b557b06ec8c5b98796a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow_ops.h @@ -0,0 +1,127 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_pow_List { + using schema = ::std::vector (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_pow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::TensorList exponent); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent); +}; + +struct TORCH_API _foreach_pow_Scalar { + using schema = ::std::vector (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_pow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[]") + static ::std::vector call(at::TensorList self, const at::Scalar & exponent); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent); +}; + +struct TORCH_API _foreach_pow_ScalarList { + using schema = ::std::vector (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_pow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::ArrayRef exponent); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef exponent); +}; + +struct TORCH_API _foreach_pow_ScalarAndTensor { + using schema = ::std::vector (const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_pow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarAndTensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[]") + static ::std::vector call(const at::Scalar & self, at::TensorList exponent); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, at::TensorList exponent); +}; + +struct TORCH_API _foreach_pow__List { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_pow_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> ()") + static void call(at::TensorList self, at::TensorList exponent); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent); +}; + +struct TORCH_API _foreach_pow__Scalar { + using schema = void (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_pow_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> ()") + static void call(at::TensorList self, const at::Scalar & exponent); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent); +}; + +struct TORCH_API _foreach_pow__ScalarList { + using schema = void (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_pow_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> ()") + static void call(at::TensorList self, at::ArrayRef exponent); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef exponent); +}; + +struct TORCH_API _foreach_pow_List_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_pow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList exponent, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent, at::TensorList out); +}; + +struct TORCH_API _foreach_pow_Scalar_out { + using schema = void (at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_pow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, const at::Scalar & exponent, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent, at::TensorList out); +}; + +struct TORCH_API _foreach_pow_ScalarList_out { + using schema = void (at::TensorList, at::ArrayRef, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_pow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::ArrayRef exponent, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef exponent, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..aea23dc1929a5ea91a2b42b95225def4d47a6bb0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _grid_sampler_2d_cpu_fallback_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_grid_sampler_2d_cpu_fallback_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3a2dc2314b0a528a382792c6d327abf41a4c1ab5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _masked_softmax_backward_out(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim, at::Tensor & out); +TORCH_API at::Tensor masked_softmax_backward_cpu(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim=c10::nullopt); +TORCH_API at::Tensor masked_softmax_backward_cuda(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3734362544f044bfd35fc618610383d8f56e64bc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _nested_tensor_from_mask_out(const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out); +TORCH_API at::Tensor NestedTensor_nested_tensor_from_mask(const at::Tensor & t, const at::Tensor & mask, bool mask_check=true); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_resize_output_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_resize_output_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bdc637df3b05b4c2adc02a3251d2ab7563c7946e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_resize_output_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _resize_output(const at::Tensor & self, at::IntArrayRef size, at::Device device); +TORCH_API at::Tensor _resize_output_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device); +TORCH_API const at::Tensor & _resize_output_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::Device device); +TORCH_API const at::Tensor & _resize_output_outf(const at::Tensor & self, at::IntArrayRef size, at::Device device, const at::Tensor & out); +TORCH_API const at::Tensor & _resize_output_symint_out(const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, at::Device device); +TORCH_API const at::Tensor & _resize_output_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device, const at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2b97f49cb9cb51e0b586a6acbcd406ec710406e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _scaled_dot_product_attention_math(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, const c10::optional & dropout_mask={}, c10::optional scale=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..94515a7aff9204e9d2075d768e6e646c1475a643 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _scaled_dot_product_cudnn_attention { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, bool, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_scaled_dot_product_cudnn_attention") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset)") + static ::std::tuple call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..75adea843af7b5ead4d528a33040e9b59dbb8757 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _scaled_dot_product_efficient_attention_backward(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array grad_input_mask, bool is_causal=false, c10::optional scale=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7f9dcd4e1ed0ddf5157fc17f67554ef0cf9a6d69 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _test_autograd_multiple_dispatch_view_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_autograd_multiple_dispatch_view_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API _test_autograd_multiple_dispatch_view_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_autograd_multiple_dispatch_view_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d9d0139be0eb2077b1ac6ff83720b2f8093aed6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _to_sparse_semi_structured(const at::Tensor & dense); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bffa26fc3c83042fa141cd93f7155b43f8cd3774 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & _upsample_nearest_exact3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool1d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool1d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..73f537de500cb75126ac6b6d71df3a701346c7ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool1d_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_avg_pool1d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef output_size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b8d828727fbac53b7bba1a82d13fa747e484c957 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_out(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a6ed35db5282768f2a909de99bd8285d09facb94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor affine_grid_generator_backward(const at::Tensor & grad, at::IntArrayRef size, bool align_corners); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3103fe5a32be8e4e1895d6679a4221332a63d61e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API batch_norm_update_stats { + using schema = ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::batch_norm_update_stats") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum); +}; + +struct TORCH_API batch_norm_update_stats_out { + using schema = ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, double, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::batch_norm_update_stats") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6c25b55fdac53c4edcaf6f5deaac87735df89b18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_bitwise_not_out : public at::meta::structured_bitwise_not { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c799ef5b6cf2a6c1a483e6ade2a4c9c0cae3845f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor bitwise_or(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_or_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_native.h new file mode 100644 index 0000000000000000000000000000000000000000..079afb8c700195299e528c7dff33f4b2d897bec0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor col_indices_default(const at::Tensor & self); +TORCH_API at::Tensor col_indices_sparse_csr(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_overrideable_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_overrideable_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4fdff8231a9b2b13ef553a6c3c0f28c941669ed6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_overrideable_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor convolution_overrideable(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups); +TORCH_API at::Tensor & convolution_overrideable_out_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3867516ddcb4830284078f960ca3f885822b5c2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor cosh(const at::Tensor & self); +TORCH_API at::Tensor & cosh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & cosh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & cosh_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/count_nonzero_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/count_nonzero_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e06afdcd8cf7d593339d8966e79c4b4df8286f30 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/count_nonzero_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor count_nonzero(const at::Tensor & self, at::IntArrayRef dim); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler.h new file mode 100644 index 0000000000000000000000000000000000000000..965aacb337ac4823346ee8d1081f7a7d985b43ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output +inline at::Tensor cudnn_grid_sampler(const at::Tensor & self, const at::Tensor & grid) { + return at::_ops::cudnn_grid_sampler::call(self, grid); +} + +// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cudnn_grid_sampler_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & grid) { + return at::_ops::cudnn_grid_sampler_out::call(self, grid, out); +} +// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cudnn_grid_sampler_outf(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out) { + return at::_ops::cudnn_grid_sampler_out::call(self, grid, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/det.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/det.h new file mode 100644 index 0000000000000000000000000000000000000000..14701e817162df7f89d8034e319fc288b7d6d11d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/det.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::det(Tensor self) -> Tensor +inline at::Tensor det(const at::Tensor & self) { + return at::_ops::det::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagflat.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagflat.h new file mode 100644 index 0000000000000000000000000000000000000000..c5c1b570f012a0278738dcf3ed16670c07768ccc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagflat.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::diagflat(Tensor self, int offset=0) -> Tensor +inline at::Tensor diagflat(const at::Tensor & self, int64_t offset=0) { + return at::_ops::diagflat::call(self, offset); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ccec06658d996ab665d2ffbe4b1e6e146eca30a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple fake_quantize_per_channel_affine_cachemask(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfftn_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfftn_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a340f2ed58761e587c443ef8a8e9e399921641a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfftn_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_ihfftn_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt); +TORCH_API const at::Tensor & fft_ihfftn_symint_out(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, const at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flip_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flip_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..76e27a330152a1ef74c221137f366df5bd66341d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flip_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & flip_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims); +TORCH_API at::Tensor & flip_outf(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmin.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmin.h new file mode 100644 index 0000000000000000000000000000000000000000..b421e06a6b286494dc0d7e27b18fd31a6a831d1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmin.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fmin(Tensor self, Tensor other) -> Tensor +inline at::Tensor fmin(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmin::call(self, other); +} + +// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmin_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmin_out::call(self, other, out); +} +// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmin_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::fmin_out::call(self, other, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..aecabc96fcf5912830ef5db4c38de28527a30c63 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & grid_sampler_2d_out(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); +TORCH_API at::Tensor grid_sampler_2d_cpu(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API at::Tensor grid_sampler_2d_cuda(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2000de3bb92a9aa2b6041de337c094e21ec6963d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..89b92f062812b21c4a6e66d73a1ff294bf4a9c45 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardswish_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardswish") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API hardswish { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardswish") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardswish(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API hardswish_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardswish_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardswish_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f5077d9af8c0a3c7690d4d46247e0d169538321c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hinge_embedding_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, double, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hinge_embedding_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..748abba02340adc8369eb4136e2affc9ea6cb50a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_add_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out); +}; + +struct TORCH_API index_add_ { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_add_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); +}; + +struct TORCH_API index_add { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); +}; + +struct TORCH_API index_add_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced.h new file mode 100644 index 0000000000000000000000000000000000000000..02adf420b8f60128d326766f8c8b68bae7edc816 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..e23576d017fcd1f660020d8ac5327ff0b9f3cd38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_complex(Tensor self) -> bool +inline bool __dispatch_is_complex(const at::Tensor & self) { + return at::_ops::is_complex::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4e0fed016566596b6dfd1845c027137314a03009 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h @@ -0,0 +1,34 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options={}); +TORCH_API at::Tensor kaiser_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length); +TORCH_API at::Tensor & kaiser_window_outf(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options={}); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic); +TORCH_API at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options={}); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic, double beta); +TORCH_API at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, double beta, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bf7fbd1775bdbac01c42b2da7a7d30b1e3a0820a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_lcm_out : public at::meta::structured_lcm { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det.h new file mode 100644 index 0000000000000000000000000000000000000000..4fd277012a00807f630a5e1944b63f1d21c75f99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_det(Tensor A) -> Tensor +inline at::Tensor linalg_det(const at::Tensor & A) { + return at::_ops::linalg_det::call(A); +} + +// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_det_out(at::Tensor & out, const at::Tensor & A) { + return at::_ops::linalg_det_out::call(A, out); +} +// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_det_outf(const at::Tensor & A, at::Tensor & out) { + return at::_ops::linalg_det_out::call(A, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1bc9b581a0a6d19e3523246a6a11aaa72bdeb416 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor log(const at::Tensor & self); +TORCH_API at::Tensor & log_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dae4f1837d7696f7fdfbeab960a29fc9958df84b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & logical_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..dfe5eb3ca58f5968f2629fd1abd81eba1a7b88b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logical_or { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logical_or") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logical_or(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API logical_or_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logical_or_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API logical_or_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logical_or") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..31eeef428664ed3588c8f0cb22aea6f5ef8e60bc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple miopen_batch_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); +TORCH_API ::std::tuple miopen_batch_norm_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..81d81a29cc5352091e529087dd15f9253debbdaf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API miopen_rnn_backward { + using schema = ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, const at::Tensor &, ::std::array); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_rnn_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])") + static ::std::tuple> call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask); + static ::std::tuple> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask); +}; + +struct TORCH_API miopen_rnn_backward_out { + using schema = void (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, const at::Tensor &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_rnn_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()") + static void call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..71a7d694452bc1598ba97d4808fcb8ee3ed53d86 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor mish(const at::Tensor & self); +TORCH_API at::Tensor & mish_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & mish_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & mish_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mv.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mv.h new file mode 100644 index 0000000000000000000000000000000000000000..59d55dfa872f551b908877b0b66650e6c4bba562 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mv.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mv(Tensor self, Tensor vec) -> Tensor +inline at::Tensor mv(const at::Tensor & self, const at::Tensor & vec) { + return at::_ops::mv::call(self, vec); +} + +// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec) { + return at::_ops::mv_out::call(self, vec, out); +} +// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mv_outf(const at::Tensor & self, const at::Tensor & vec, at::Tensor & out) { + return at::_ops::mv_out::call(self, vec, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mvlgamma_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mvlgamma_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..be28c123efab9206c637ea22095cb6a9a22a2842 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mvlgamma_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & mvlgamma_out(at::Tensor & out, const at::Tensor & self, int64_t p); +TORCH_API at::Tensor & mvlgamma_outf(const at::Tensor & self, int64_t p, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8cac29066855d85ff335ae02608fde7ec841711b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor narrow_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); +TORCH_API at::Tensor narrow_nested_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); +TORCH_API at::Tensor narrow_tensor_symint(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_group_norm_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_group_norm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..21effc0f3bb48d27f0463e9a51a6320b0c514875 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_group_norm_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple native_group_norm(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps); +TORCH_API ::std::tuple native_group_norm_symint(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/new_empty_strided_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/new_empty_strided_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..25ebc38120c7ab4b8c70c7021d1c2aad283e4678 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/new_empty_strided_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor new_empty_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={}); +TORCH_API at::Tensor new_empty_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor new_empty_strided_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options={}); +TORCH_API at::Tensor new_empty_strided_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_scales_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_scales_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d3038fce9c84272ca3dacf2878ef617c0fbc8c03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_scales_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & q_per_channel_scales_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor q_per_channel_scales(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/qr_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/qr_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1b69e399fbb719684c1636c710c889187f40e6e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/qr_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple qr(const at::Tensor & self, bool some=true); +TORCH_API ::std::tuple qr_out(const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/qscheme_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/qscheme_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..58ff85e666397529bcb954cc468d3a0076285d78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/qscheme_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API qscheme { + using schema = at::QScheme (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::qscheme") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "qscheme(Tensor self) -> QScheme") + static at::QScheme call(const at::Tensor & self); + static at::QScheme redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_tanh_cell_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_tanh_cell_native.h new file mode 100644 index 0000000000000000000000000000000000000000..42ae97a10bbb30d5b64f1078d50588a6d27f15f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_tanh_cell_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor quantized_rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like.h new file mode 100644 index 0000000000000000000000000000000000000000..288a4f1da7516623c9cee956e2b8f0090bab0ff0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor rand_like(const at::Tensor & self, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::rand_like::call(self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor rand_like(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::rand_like::call(self, dtype, layout, device, pin_memory, memory_format); +} + +// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_like_out(at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::rand_like_out::call(self, memory_format, out); +} +// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_like_outf(const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::rand_like_out::call(self, memory_format, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_tanh_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_tanh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..03747d0a2c35195fa52942673371890d10566a94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_tanh_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple rnn_tanh(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API ::std::tuple rnn_tanh(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/signbit.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/signbit.h new file mode 100644 index 0000000000000000000000000000000000000000..1174366e885e5a4b44110887a0da35c7c56f37b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/signbit.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::signbit(Tensor self) -> Tensor +inline at::Tensor signbit(const at::Tensor & self) { + return at::_ops::signbit::call(self); +} + +// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & signbit_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::signbit_out::call(self, out); +} +// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & signbit_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::signbit_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5857a620b802fee09cf49216dfa7afd1a9c70d09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sparse_csc_tensor_ccol_row_value_size { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_csc_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ccol_row_value_size") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor") + static at::Tensor call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API sparse_csc_tensor_ccol_row_value { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_csc_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ccol_row_value") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor") + static at::Tensor call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3d015efd6e68c7cc319766ad493734b940fb64b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sparse_resize_and_clear_ { + using schema = const at::Tensor & (const at::Tensor &, at::IntArrayRef, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_resize_and_clear_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); +}; + +struct TORCH_API sparse_resize_and_clear_out { + using schema = const at::Tensor & (const at::Tensor &, at::IntArrayRef, int64_t, int64_t, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_resize_and_clear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out); +}; + +struct TORCH_API sparse_resize_and_clear { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_resize_and_clear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e5a3447069dea6ab0215b4c99a01267fcdd2ee03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_hermite_polynomial_h_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..6de4593cf0007f4aec805b0ad9b9539f829899b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_i0e : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sqrt_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sqrt_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7be1ba026f21fcd83b33a73e364ec3692408636b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sqrt_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sqrt { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sqrt") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sqrt(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API sqrt_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sqrt_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sqrt_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API sqrt_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sqrt") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1539e5e8740bca88c2504d7626decb22e3e7ad8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor sum(const at::Tensor & self, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & sum_outf(const at::Tensor & self, c10::optional dtype, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_storage_offset_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_storage_offset_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9828d6c5f78695d549761100739ffba887929ae5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_storage_offset_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API c10::SymInt sym_storage_offset(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensordot_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensordot_native.h new file mode 100644 index 0000000000000000000000000000000000000000..14b8373fec80db07d31bfa672e9afc0b236df11f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensordot_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor tensordot(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other); +TORCH_API at::Tensor & tensordot_out(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8c9deb6649bbb129b2c7f0f99a8398f0dd07ff09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +TORCH_API at::Tensor & threshold_(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_backward_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9c8817c5667b9f0b77bcb9b3ee7e731947bfb9ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor to_mkldnn_backward(const at::Tensor & grad, const at::Tensor & input); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5a22c9c71441ac2cfb17298e96ddf6d95d1efd9f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API unfold { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unfold") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_copy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..69869b1051b4e9d97d01a9c1e6f21eb5027a359c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::unsqueeze_copy(Tensor self, int dim) -> Tensor +inline at::Tensor unsqueeze_copy(const at::Tensor & self, int64_t dim) { + return at::_ops::unsqueeze_copy::call(self, dim); +} + +// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unsqueeze_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::unsqueeze_copy_out::call(self, dim, out); +} +// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unsqueeze_copy_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::unsqueeze_copy_out::call(self, dim, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..45b8eff02e8433a3c4114b0b360881ce8ecd4757 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_bicubic2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & upsample_bicubic2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/xlogy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/xlogy.h new file mode 100644 index 0000000000000000000000000000000000000000..7a376d765abe1a85414c6bfef9818fe3e9ad633b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/xlogy.h @@ -0,0 +1,77 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor xlogy(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::xlogy_Tensor::call(self, other); +} + +// aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor +inline at::Tensor xlogy(const at::Scalar & self, const at::Tensor & other) { + return at::_ops::xlogy_Scalar_Self::call(self, other); +} + +// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor +inline at::Tensor xlogy(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::xlogy_Scalar_Other::call(self, other); +} + +// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & xlogy_(at::Tensor & self, const at::Tensor & other) { + return at::_ops::xlogy__Tensor::call(self, other); +} + +// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & xlogy_(at::Tensor & self, const at::Scalar & other) { + return at::_ops::xlogy__Scalar_Other::call(self, other); +} + +// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::xlogy_OutTensor::call(self, other, out); +} +// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & xlogy_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::xlogy_OutTensor::call(self, other, out); +} + +// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & xlogy_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::xlogy_OutScalar_Self::call(self, other, out); +} +// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & xlogy_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::xlogy_OutScalar_Self::call(self, other, out); +} + +// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::xlogy_OutScalar_Other::call(self, other, out); +} +// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & xlogy_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::xlogy_OutScalar_Other::call(self, other, out); +} + +}