diff --git a/ckpts/universal/global_step20/zero/18.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/18.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..1e6a1b306ae9e1a61c4f04d761daadae416f9426 --- /dev/null +++ b/ckpts/universal/global_step20/zero/18.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63afb1725252102af0d44a32201d9adc57308a0cc664df747afbb06353ded103 +size 50332828 diff --git a/ckpts/universal/global_step20/zero/18.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step20/zero/18.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..c5c5e2a80bde36b94bc3cc2c58b156aca9ad4c31 --- /dev/null +++ b/ckpts/universal/global_step20/zero/18.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6fc2446f23e5b2188303f46dd42a90e97d53c6a7ecd39d7e64f27272c0dbace +size 50332749 diff --git a/ckpts/universal/global_step20/zero/22.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/22.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..1b28b150edf65481e3e503f39aea30e0979f74ee --- /dev/null +++ b/ckpts/universal/global_step20/zero/22.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57952091de2a0c3ea99f3aba93eb66382c80348caf99cbfe50349f28173d6703 +size 33555612 diff --git a/ckpts/universal/global_step20/zero/25.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/25.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..2f12b5cb2c0f8a3b81f3ee46f3306a0a83c7efab --- /dev/null +++ b/ckpts/universal/global_step20/zero/25.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f23472630b9cab8bc0802e68f8dd357231655171f65e83bb761c9439b7e0302 +size 16778396 diff --git a/ckpts/universal/global_step20/zero/25.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/25.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..1eb03c0f3efef663cfab79db4136303e00979c5f --- /dev/null +++ b/ckpts/universal/global_step20/zero/25.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cae3d41ac684e116eddff628753c6320266e711f6169cd292093198e04df9f6d +size 16778411 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dacd554c9832d5cd9c2ea42f0099d911bba1e743 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _cdist_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist); +TORCH_API at::Tensor & _cdist_backward_outf(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_chunk_cat.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_chunk_cat.h new file mode 100644 index 0000000000000000000000000000000000000000..4263b4bf3ec2d9c090ffea6f9a4dbb75bc2530f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_chunk_cat.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_chunk_cat(Tensor[] tensors, int dim, int num_chunks) -> Tensor +inline at::Tensor _chunk_cat(at::TensorList tensors, int64_t dim, int64_t num_chunks) { + return at::_ops::_chunk_cat::call(tensors, dim, num_chunks); +} + +// aten::_chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _chunk_cat_out(at::Tensor & out, at::TensorList tensors, int64_t dim, int64_t num_chunks) { + return at::_ops::_chunk_cat_out::call(tensors, dim, num_chunks, out); +} +// aten::_chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _chunk_cat_outf(at::TensorList tensors, int64_t dim, int64_t num_chunks, at::Tensor & out) { + return at::_ops::_chunk_cat_out::call(tensors, dim, num_chunks, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_search_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_search_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ac9b43c4d558c1c75edace56471c93e954e4ad32 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_search_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API int64_t _cslt_sparse_mm_search(const at::Tensor & compressed_A, const at::Tensor & dense_B, const c10::optional & bias={}, const c10::optional & alpha={}, c10::optional out_dtype=c10::nullopt, bool transpose_result=false); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ac7215b46c1483f171584b48691d6794434e3e3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API int64_t _cufft_get_plan_cache_size(at::DeviceIndex device_index); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1328a65a554e012a2ac6a82b341556e68c12ad00 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_acos { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_acos") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_acos(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_acos_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_acos_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_acos_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_acos_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_acos") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lgamma_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lgamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5f03c4ef94d7002fdc73406e1d96b710e646f810 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lgamma_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_lgamma_out(at::TensorList self, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_lgamma_slow(at::TensorList self); +TORCH_API void foreach_tensor_lgamma_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_lgamma_cuda(at::TensorList self); +TORCH_API void foreach_tensor_lgamma_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow.h new file mode 100644 index 0000000000000000000000000000000000000000..a72477c6b9fbeca49a5177270393842644b2573f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow.h @@ -0,0 +1,87 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[] +inline ::std::vector _foreach_pow(at::TensorList self, at::TensorList exponent) { + return at::_ops::_foreach_pow_List::call(self, exponent); +} + +// aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[] +inline ::std::vector _foreach_pow(at::TensorList self, const at::Scalar & exponent) { + return at::_ops::_foreach_pow_Scalar::call(self, exponent); +} + +// aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[] +inline ::std::vector _foreach_pow(at::TensorList self, at::ArrayRef exponent) { + return at::_ops::_foreach_pow_ScalarList::call(self, exponent); +} + +// aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[] +inline ::std::vector _foreach_pow(const at::Scalar & self, at::TensorList exponent) { + return at::_ops::_foreach_pow_ScalarAndTensor::call(self, exponent); +} + +// aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> () +inline void _foreach_pow_(at::TensorList self, at::TensorList exponent) { + return at::_ops::_foreach_pow__List::call(self, exponent); +} + +// aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> () +inline void _foreach_pow_(at::TensorList self, const at::Scalar & exponent) { + return at::_ops::_foreach_pow__Scalar::call(self, exponent); +} + +// aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> () +inline void _foreach_pow_(at::TensorList self, at::ArrayRef exponent) { + return at::_ops::_foreach_pow__ScalarList::call(self, exponent); +} + +// aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> () +inline void _foreach_pow_out(at::TensorList out, at::TensorList self, at::TensorList exponent) { + return at::_ops::_foreach_pow_List_out::call(self, exponent, out); +} +// aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> () +inline void _foreach_pow_outf(at::TensorList self, at::TensorList exponent, at::TensorList out) { + return at::_ops::_foreach_pow_List_out::call(self, exponent, out); +} + +// aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> () +inline void _foreach_pow_out(at::TensorList out, at::TensorList self, const at::Scalar & exponent) { + return at::_ops::_foreach_pow_Scalar_out::call(self, exponent, out); +} +// aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> () +inline void _foreach_pow_outf(at::TensorList self, const at::Scalar & exponent, at::TensorList out) { + return at::_ops::_foreach_pow_Scalar_out::call(self, exponent, out); +} + +// aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> () +inline void _foreach_pow_out(at::TensorList out, at::TensorList self, at::ArrayRef exponent) { + return at::_ops::_foreach_pow_ScalarList_out::call(self, exponent, out); +} +// aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> () +inline void _foreach_pow_outf(at::TensorList self, at::ArrayRef exponent, at::TensorList out) { + return at::_ops::_foreach_pow_ScalarList_out::call(self, exponent, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d5f5f11d11d865a74f41fdbd495c6f1845f7e1a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _foreach_reciprocal_out(at::TensorList out, at::TensorList self); +TORCH_API void _foreach_reciprocal_outf(at::TensorList self, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data.h new file mode 100644 index 0000000000000000000000000000000000000000..c41dbb63da4e2a0ceefc75e35dc5e039f01f7236 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor +inline at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + return at::_ops::_log_softmax_backward_data::call(grad_output, output, dim, input_dtype); +} + +// aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + return at::_ops::_log_softmax_backward_data_out::call(grad_output, output, dim, input_dtype, out); +} +// aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) { + return at::_ops::_log_softmax_backward_data_out::call(grad_output, output, dim, input_dtype, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_native.h new file mode 100644 index 0000000000000000000000000000000000000000..32cf6972032e77a3f02b39af43dcbe9069f11166 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _neg_view(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..af433a35d395d27da531129556446d63c3a0469a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor +inline at::Tensor _nested_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false) { + return at::_ops::_nested_sum_backward::call(grad, self, dim, keepdim); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_softmax_with_shape_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_softmax_with_shape_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..32a10fc8abeb4a3086df473dfb462e5701c4fc74 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_softmax_with_shape_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nested_tensor_softmax_with_shape { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_tensor_softmax_with_shape") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & query); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & query); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnz.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnz.h new file mode 100644 index 0000000000000000000000000000000000000000..50063e91a7645063e251c06062ba41859892586e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnz.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias.h new file mode 100644 index 0000000000000000000000000000000000000000..3d2307a1b2052fe98f504126758d8ce5b214af89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) +inline at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::_reshape_alias::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); +} +namespace symint { + template ::value>> + at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::_reshape_alias::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); + } +} + +// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) +inline at::Tensor _reshape_alias_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::_reshape_alias::call(self, size, stride); +} +namespace symint { + template ::value>> + at::Tensor _reshape_alias(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::_reshape_alias::call(self, size, stride); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_resize_output_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_resize_output_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3de95359cfdc4ab81eac2643ba64e8db36400f7b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_resize_output_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API const at::Tensor & _resize_output_(const at::Tensor & self, at::IntArrayRef size, at::Device device); +TORCH_API const at::Tensor & _resize_output__symint(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_initialize_state.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_initialize_state.h new file mode 100644 index 0000000000000000000000000000000000000000..385a33ea851b7d5372ed01434be6719f5e610788 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_initialize_state.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!) +inline at::Tensor & _sobol_engine_initialize_state_(at::Tensor & self, int64_t dimension) { + return at::_ops::_sobol_engine_initialize_state_::call(self, dimension); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e37de508b4b2edabfd32b89effe0b260e7c05858 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _test_warn_in_autograd(const at::Tensor & self); +TORCH_API at::Tensor & _test_warn_in_autograd_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & _test_warn_in_autograd_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_native.h new file mode 100644 index 0000000000000000000000000000000000000000..62f812925f15c4ff5556f1bb3f7eb648df15a61e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _to_dense_out(const at::Tensor & self, c10::optional dtype, c10::optional masked_grad, at::Tensor & out); +TORCH_API at::Tensor sparse_to_dense(const at::Tensor & self, c10::optional dtype=c10::nullopt, c10::optional masked_grad=c10::nullopt); +TORCH_API at::Tensor sparse_compressed_to_dense(const at::Tensor & self, c10::optional dtype=c10::nullopt, c10::optional masked_grad=c10::nullopt); +TORCH_API at::Tensor mkldnn_to_dense(const at::Tensor & self, c10::optional dtype=c10::nullopt, c10::optional masked_grad=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..902d04907b0b48bb3bbc0a2c675512257defc50e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _to_sparse_out(at::Tensor & out, const at::Tensor & self, int64_t sparse_dim); +TORCH_API at::Tensor & _to_sparse_outf(const at::Tensor & self, int64_t sparse_dim, at::Tensor & out); +TORCH_API at::Tensor & _to_sparse_out(at::Tensor & out, const at::Tensor & self, c10::optional layout=c10::nullopt, at::OptionalIntArrayRef blocksize=c10::nullopt, c10::optional dense_dim=c10::nullopt); +TORCH_API at::Tensor & _to_sparse_outf(const at::Tensor & self, c10::optional layout, at::OptionalIntArrayRef blocksize, c10::optional dense_dim, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_version_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_version_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c6daa50be4658317a28f6cde9ed32804da3f762f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_version_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API int64_t _version(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..19743d58631af08944eef29a372472e2a180a648 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6fa8df02b4b8034b1b8720a7b5ef2aa091931e9f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_native.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_addmm_out_cpu : public at::meta::structured_addmm { +void impl(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, const at::Tensor & out); +}; +struct TORCH_API structured_addmm_out_cuda : public at::meta::structured_addmm { +void impl(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, const at::Tensor & out); +}; +TORCH_API at::Tensor addmm_sparse_dense_cpu(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmm_out_sparse_dense_cpu(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & s_addmm_sparse_dense_cpu_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor addmm_sparse_dense_cuda(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmm_out_sparse_dense_cuda(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & s_addmm_sparse_dense_cuda_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor addmm_sparse_compressed_dense(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmm_out_sparse_compressed_cpu(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addmm_out_sparse_compressed_cuda(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a08ca6217cf5350103697dfe05fc07552ec38752 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API affine_grid_generator { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::affine_grid_generator") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor") + static at::Tensor call(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners); +}; + +struct TORCH_API affine_grid_generator_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::affine_grid_generator") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amax_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amax_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..15333c3c84b3b1c4ecade8802c529033ae9a7697 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amax_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor amax(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); +TORCH_API at::Tensor & amax_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); +TORCH_API at::Tensor & amax_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0e852c7a20d1c2589fb8f53615080279e7ec581c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple batch_norm_update_stats_out(const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple batch_norm_update_stats_cpu(const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum); +TORCH_API ::std::tuple batch_norm_update_stats_cuda(const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..603471784ac99a5a451b8f88e229d35cea875873 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API binary_cross_entropy_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binary_cross_entropy_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction); +}; + +struct TORCH_API binary_cross_entropy_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binary_cross_entropy_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & grad_input); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..59467da0cd481e6144608abd6072cd24f4fa6bb4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API bitwise_not { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_not") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_not(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API bitwise_not_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_not_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_not_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API bitwise_not_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_not") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f4613a6859dc0e3752f3cbece5267b16bbee823 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor ceil(const at::Tensor & self); +TORCH_API at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & ceil_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp.h new file mode 100644 index 0000000000000000000000000000000000000000..6433808925b5a64cc4d79734bc2471a655c4292d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp.h @@ -0,0 +1,63 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor +inline at::Tensor clamp(const at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clamp::call(self, min, max); +} + +// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor +inline at::Tensor clamp(const at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clamp_Tensor::call(self, min, max); +} + +// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) +inline at::Tensor & clamp_(at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clamp_::call(self, min, max); +} + +// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) +inline at::Tensor & clamp_(at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clamp__Tensor::call(self, min, max); +} + +// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & clamp_out(at::Tensor & out, const at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clamp_out::call(self, min, max, out); +} +// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & clamp_outf(const at::Tensor & self, const c10::optional & min, const c10::optional & max, at::Tensor & out) { + return at::_ops::clamp_out::call(self, min, max, out); +} + +// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & clamp_out(at::Tensor & out, const at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clamp_Tensor_out::call(self, min, max, out); +} +// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & clamp_outf(const at::Tensor & self, const c10::optional & min, const c10::optional & max, at::Tensor & out) { + return at::_ops::clamp_Tensor_out::call(self, min, max, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6c1048f3444ee191275a53ec76b656c8f4f4f326 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor clamp(const at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt); +TORCH_API at::Tensor & clamp_(at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt); +TORCH_API at::Tensor clamp(const at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}); +TORCH_API at::Tensor & clamp_(at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce.h new file mode 100644 index 0000000000000000000000000000000000000000..924128f2c96bdfebd5f14ed4610d7cd40b7889a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6a647d1850f7008f6c9090aac95773998bd935de --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & conv_depthwise3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation); +TORCH_API at::Tensor & conv_depthwise3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out); +TORCH_API at::Tensor & conv_depthwise3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); +TORCH_API at::Tensor & conv_depthwise3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos.h new file mode 100644 index 0000000000000000000000000000000000000000..eb25f79f39f70667dac828f4546371bcbb5ce413 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cos(Tensor self) -> Tensor +inline at::Tensor cos(const at::Tensor & self) { + return at::_ops::cos::call(self); +} + +// aten::cos_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & cos_(at::Tensor & self) { + return at::_ops::cos_::call(self); +} + +// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cos_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::cos_out::call(self, out); +} +// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cos_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::cos_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..58b004e9b0fd60927dc92e7facf1d7050a2dd770 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_cos : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosine_similarity_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosine_similarity_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..47334abeab7085f04f187e23d343fa4be54c47b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosine_similarity_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor cosine_similarity(const at::Tensor & x1, const at::Tensor & x2, int64_t dim=1, double eps=1e-08); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a6d41c754e38b54ff0cc9275db35eb7fecdf69d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & cudnn_grid_sampler_out(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out); +TORCH_API at::Tensor cudnn_grid_sampler_forward(const at::Tensor & self, const at::Tensor & grid); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diff.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diff.h new file mode 100644 index 0000000000000000000000000000000000000000..129f4831768c2f9131cd3d1fd8c2286b071f751e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diff.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor +inline at::Tensor diff(const at::Tensor & self, int64_t n=1, int64_t dim=-1, const c10::optional & prepend={}, const c10::optional & append={}) { + return at::_ops::diff::call(self, n, dim, prepend, append); +} + +// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diff_out(at::Tensor & out, const at::Tensor & self, int64_t n=1, int64_t dim=-1, const c10::optional & prepend={}, const c10::optional & append={}) { + return at::_ops::diff_out::call(self, n, dim, prepend, append, out); +} +// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diff_outf(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append, at::Tensor & out) { + return at::_ops::diff_out::call(self, n, dim, prepend, append, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d51667baddde7e77e3ca3a69e9da9ed6adb86452 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor embedding_backward_symint(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4a0b82a7fa315eb6e8bba6fea18731d1c7bc590b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API expand { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::expand") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f03df4121236d2c2cf9e3f339803c422cf3149d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fft_fftfreq { + using schema = at::Tensor (int64_t, double, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_fftfreq") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API fft_fftfreq_out { + using schema = at::Tensor & (int64_t, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_fftfreq") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t n, double d, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/full.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/full.h new file mode 100644 index 0000000000000000000000000000000000000000..8be99f2fb1f1fde0f060c4ac84a1c45bb879c0d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/full.h @@ -0,0 +1,131 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, at::TensorOptions options={}) { + return at::_ops::full_names::call(size, fill_value, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::full_names::call(size, fill_value, names, dtype, layout, device, pin_memory); +} + +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory); + } +} + +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::call(size, fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor full(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::call(size, fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::full::call(size, fill_value, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor full(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::full::call(size, fill_value, dtype, layout, device, pin_memory); + } +} + +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out); +} +namespace symint { + template ::value>> + at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out); + } +} + +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out); +} +namespace symint { + template ::value>> + at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out); + } +} + +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_symint_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::call(size, fill_value, out); +} +namespace symint { + template ::value>> + at::Tensor & full_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::call(size, fill_value, out); + } +} + +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_symint_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::call(size, fill_value, out); +} +namespace symint { + template ::value>> + at::Tensor & full_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::call(size, fill_value, out); + } +} + +// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names) { + return at::_ops::full_names_out::call(size, fill_value, names, out); +} +// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, at::Tensor & out) { + return at::_ops::full_names_out::call(size, fill_value, names, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..87d3f521d623b05e3ae74ba8e93887ccd05a1831 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple geqrf(const at::Tensor & self); +TORCH_API ::std::tuple geqrf_out(at::Tensor & a, at::Tensor & tau, const at::Tensor & self); +TORCH_API ::std::tuple geqrf_outf(const at::Tensor & self, at::Tensor & a, at::Tensor & tau); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1abc1ed59555bb1223fa9b4fa14beb50b2da6cc0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor igamma(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9cdebee676db088e052c2c9d2f4c66ef96189cb6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..cc3b571d5fbb044b52732fb8b10f58c74fe03147 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_lt_Scalar : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & other); +}; +struct TORCH_API structured_lt_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cb106bc85639df0a72a06927a60a95d2d0990068 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_native.h new file mode 100644 index 0000000000000000000000000000000000000000..53fe9cae863b0154723b72aa52a87cea554c5493 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple max_pool3d_with_indices_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool3d_with_indices_out_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); +TORCH_API ::std::tuple max_pool3d_with_indices_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool3d_with_indices_out_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..24efbc157c4a474569e90b9c1241265e267d5efd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor max_unpool3d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor max_unpool3d_symint(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & max_unpool3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & max_unpool3d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & max_unpool3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & max_unpool3d_symint_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..13a50f76303ba1b336f82ecb4e3071506ced7088 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor minimum(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & minimum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & minimum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3352a9f373be6eae8a3726a156a8d6b99f4872a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & mse_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & mse_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ae7cd05e94e3df0d80cf58b6f16fc7038ae27f71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor multilabel_margin_loss_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target); +TORCH_API at::Tensor & multilabel_margin_loss_backward_cpu_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input); +TORCH_API at::Tensor multilabel_margin_loss_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target); +TORCH_API at::Tensor & multilabel_margin_loss_backward_cuda_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e34700f1814d5a24915374e6177d0cb54e7819f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor nan_to_num(const at::Tensor & self, c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt); +TORCH_API at::Tensor & nan_to_num_(at::Tensor & self, c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt); +TORCH_API at::Tensor & nan_to_num_out(const at::Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf, at::Tensor & out); +TORCH_API at::Tensor nan_to_num_sparse(const at::Tensor & self, c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt); +TORCH_API at::Tensor & nan_to_num_sparse_out(const at::Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf, at::Tensor & out); +TORCH_API at::Tensor & nan_to_num_sparse_(at::Tensor & self, c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_except_dim_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_except_dim_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8918e5c22c1e63b7ee57c56d3e5163a7c672252e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_except_dim_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor norm_except_dim(const at::Tensor & v, int64_t pow=2, int64_t dim=0); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pinverse.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pinverse.h new file mode 100644 index 0000000000000000000000000000000000000000..d0aef083294b90db38b0067ec528382c476bfa0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pinverse.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor +inline at::Tensor pinverse(const at::Tensor & self, double rcond=1e-15) { + return at::_ops::pinverse::call(self, rcond); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9d93d6aee86bcf5ce3af0ee3a5168ddc116d5c1c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor rand_like(const at::Tensor & self, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor & rand_like_out(const at::Tensor & self, c10::optional memory_format, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6e046476e70837f951a611b7a2d7b89c4ded5b9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor reflection_pad2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor & reflection_pad2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f9f4522c159a31a7738da8a98282b609341b0da8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_cuda_dispatch.h @@ -0,0 +1,38 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/select_copy_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/select_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9179e7e3e4599013de49c723c8838e9d9a72e85c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/select_copy_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & select_copy_int_out_symint(const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out); +TORCH_API at::Tensor select_copy_sparse_csr(const at::Tensor & self, int64_t dim, int64_t index); +TORCH_API at::Tensor select_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt index); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_backward_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ba92dc0c314e812358d21ef4b3feec5b9774f356 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & output); +TORCH_API at::Tensor & sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output); +TORCH_API at::Tensor & sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_backward_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9cad570f44b4cd8cb3c9cb8310823fe71e65076c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & softshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & softshrink_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csr_tensor_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csr_tensor_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6d8778a2471f818e7b8d9001dc0bc9c57fe0a74e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csr_tensor_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options); +TORCH_API at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options); +TORCH_API at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..3817f99e24d5d62f82db545e7716da369c489f20 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_airy_ai : public TensorIteratorBase { + + + void meta(const at::Tensor & x); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1067bdad16589995b51a1e6b3c5ad2024fbfb766 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_bessel_j0(const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j0_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_gammaln_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_gammaln_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e6b0b4acbb2e09718827e0fd0077b5fe0fb47921 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_gammaln_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_gammaln { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_gammaln") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_gammaln(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_gammaln_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_gammaln") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..04aedbe51b8f90414698254408cc241fcd796460 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_laguerre_polynomial_l(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_laguerre_polynomial_l_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_laguerre_polynomial_l_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6cd978f1f3cb2d3f9bc5b85cbe32d2c8edda7f54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_legendre_polynomial_p(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtr_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..34ff507bf09b82a9c8a0b6d4ae5df7ecc3ef49a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtr_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_ndtr { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_ndtr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_ndtr(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_ndtr_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_ndtr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_copy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..6e1bbb987bfee8db81de231a8fa337d589324134 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_copy.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] +inline ::std::vector split_copy(const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor::call(self, split_size, dim); +} +namespace symint { + template ::value>> + ::std::vector split_copy(const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor::call(self, split_size, dim); + } +} + +// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] +inline ::std::vector split_copy_symint(const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor::call(self, split_size, dim); +} +namespace symint { + template ::value>> + ::std::vector split_copy(const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor::call(self, split_size, dim); + } +} + +// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () +inline void split_copy_out(at::TensorList out, const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out); +} +namespace symint { + template ::value>> + void split_copy_out(at::TensorList out, const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out); + } +} + +// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () +inline void split_copy_outf(const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) { + return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out); +} +namespace symint { + template ::value>> + void split_copy_outf(const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) { + return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out); + } +} + +// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () +inline void split_copy_symint_out(at::TensorList out, const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out); +} +namespace symint { + template ::value>> + void split_copy_out(at::TensorList out, const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out); + } +} + +// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () +inline void split_copy_symint_outf(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { + return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out); +} +namespace symint { + template ::value>> + void split_copy_outf(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { + return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_numel_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_numel_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..91543c41d42df90d8098dd1c920ad0667ca27496 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_numel_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sym_numel { + using schema = c10::SymInt (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sym_numel") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sym_numel(Tensor self) -> SymInt") + static c10::SymInt call(const at::Tensor & self); + static c10::SymInt redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/take_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/take_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e85629ac48f0b53748b405fe6394105cb9e5cc23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/take_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API take_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::take") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & index, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, at::Tensor & out); +}; + +struct TORCH_API take { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::take") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "take(Tensor self, Tensor index) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_bsc.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_bsc.h new file mode 100644 index 0000000000000000000000000000000000000000..3d267f62fdc92a763ffcbd8f110510fe6598b87c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_bsc.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trace_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trace_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5160db093adeedbdbbe7f6fd6e76d24c223a19a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trace_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API trace { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::trace") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "trace(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API trace_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::trace") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_copy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..4a8e83dad4c828e54b0d237d2b71e2ab8e2b5fbd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor +inline at::Tensor unfold_copy(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { + return at::_ops::unfold_copy::call(self, dimension, size, step); +} + +// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unfold_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { + return at::_ops::unfold_copy_out::call(self, dimension, size, step, out); +} +// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unfold_copy_outf(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_copy_out::call(self, dimension, size, step, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d.h new file mode 100644 index 0000000000000000000000000000000000000000..dadf5b7562bb50a4a73e2d7e9358eb26581b108c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d.h @@ -0,0 +1,113 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +inline at::Tensor upsample_linear1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); +} +namespace symint { + template ::value>> + at::Tensor upsample_linear1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); + } +} + +// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +inline at::Tensor upsample_linear1d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_vec::call(input, output_size, align_corners, scale_factors); +} +namespace symint { + template ::value>> + at::Tensor upsample_linear1d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_vec::call(input, output_size, align_corners, scale_factors); + } +} + +// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & upsample_linear1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales, out); +} +namespace symint { + template ::value>> + at::Tensor & upsample_linear1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales, out); + } +} + +// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & upsample_linear1d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales, at::Tensor & out) { + return at::_ops::upsample_linear1d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales, out); +} +namespace symint { + template ::value>> + at::Tensor & upsample_linear1d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales, at::Tensor & out) { + return at::_ops::upsample_linear1d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales, out); + } +} + +// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & upsample_linear1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_out::call(self, output_size, align_corners, scales, out); +} +namespace symint { + template ::value>> + at::Tensor & upsample_linear1d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_out::call(self, output_size, align_corners, scales, out); + } +} + +// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & upsample_linear1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales, at::Tensor & out) { + return at::_ops::upsample_linear1d_out::call(self, output_size, align_corners, scales, out); +} +namespace symint { + template ::value>> + at::Tensor & upsample_linear1d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales, at::Tensor & out) { + return at::_ops::upsample_linear1d_out::call(self, output_size, align_corners, scales, out); + } +} + +// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor +inline at::Tensor upsample_linear1d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales); +} +namespace symint { + template ::value>> + at::Tensor upsample_linear1d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales); + } +} + +// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor +inline at::Tensor upsample_linear1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d::call(self, output_size, align_corners, scales); +} +namespace symint { + template ::value>> + at::Tensor upsample_linear1d(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d::call(self, output_size, align_corners, scales); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..741fae22935fc5e8405a637582b2faa3d69466d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_nearest1d_vec { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, c10::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor") + static at::Tensor call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors); +}; + +struct TORCH_API upsample_nearest1d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, at::Tensor & out); +}; + +struct TORCH_API upsample_nearest1d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales); +}; + +}} // namespace at::_ops