diff --git a/ckpts/universal/global_step20/zero/22.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/22.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..dc80b7c9dc690c4ad764254ea36051d6db3f3820 --- /dev/null +++ b/ckpts/universal/global_step20/zero/22.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab0a1e6350478bc859b581b379fb6ac6defe6591f1b1710f4eb7ab4715459d11 +size 16778411 diff --git a/ckpts/universal/global_step20/zero/23.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step20/zero/23.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..d94325dce8cb89b8cafec33b4f8f33a6bba88e50 --- /dev/null +++ b/ckpts/universal/global_step20/zero/23.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1a8c55e8688df9971f67f6faa8f1aff63e73a125c9ea017a9087a2de1399e26 +size 9293 diff --git a/ckpts/universal/global_step20/zero/7.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/7.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..cce0c74f8753c73172bae7c60918d4504afbde7d --- /dev/null +++ b/ckpts/universal/global_step20/zero/7.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fe684c38507a042a5ea38aa6be718855dc59ecf15d3bd819afe40c56711f62b +size 50332828 diff --git a/ckpts/universal/global_step20/zero/7.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/7.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..8cd4c53953226c3c81762f5634e70e20ebfd8e66 --- /dev/null +++ b/ckpts/universal/global_step20/zero/7.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:588c7374b127fda5eb2cc50d88c4523a07f1e33526b19b25dc840cc08fed40af +size 50332843 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_add_batch_dim_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_add_batch_dim_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1d7afaac43659404f572f92ba2d5c8fd19e5c4e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_add_batch_dim_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _add_batch_dim(const at::Tensor & self, int64_t batch_dim, int64_t level); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..742268fe55815f22d111b7c7fa6685c6946cc071 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & _amp_update_scale_(at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3e4ec67d49cade17d22cce4993b0b64b2c480022 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _cslt_sparse_mm(const at::Tensor & compressed_A, const at::Tensor & dense_B, const c10::optional & bias={}, const c10::optional & alpha={}, c10::optional out_dtype=c10::nullopt, bool transpose_result=false, int64_t alg_id=0); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c498537f98a1d734b7c6989526f42bae8b7daa15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API int64_t _cufft_get_plan_cache_size(at::DeviceIndex device_index); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummin_helper_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummin_helper_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1dae58ad0ed17a1f1494a2e9e323bb28fbdac6ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummin_helper_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cummin_helper { + using schema = void (const at::Tensor &, at::Tensor &, at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cummin_helper") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()") + static void call(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sub_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sub_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c9b1bfcd1f037556aca59dd99dd8f7b236a859b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sub_ops.h @@ -0,0 +1,116 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_sub_Scalar { + using schema = ::std::vector (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]") + static ::std::vector call(at::TensorList self, const at::Scalar & scalar); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_sub__Scalar { + using schema = void (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_sub_List { + using schema = ::std::vector (at::TensorList, at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::TensorList other, const at::Scalar & alpha); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha); +}; + +struct TORCH_API _foreach_sub__List { + using schema = void (at::TensorList, at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()") + static void call(at::TensorList self, at::TensorList other, const at::Scalar & alpha); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha); +}; + +struct TORCH_API _foreach_sub_ScalarList { + using schema = ::std::vector (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::ArrayRef scalars); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_sub__ScalarList { + using schema = void (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_sub_Scalar_out { + using schema = void (at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out); +}; + +struct TORCH_API _foreach_sub_List_out { + using schema = void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out); +}; + +struct TORCH_API _foreach_sub_ScalarList_out { + using schema = void (at::TensorList, at::ArrayRef, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..efa3ad8f74245ca61e60ad3ca9deea6e0e806849 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _indices { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_indices(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9721681c5294a31a144db41d87cc35cf383016c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_outf(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); +TORCH_API ::std::tuple _native_batch_norm_legit(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_outf(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_jagged_dummy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_jagged_dummy.h new file mode 100644 index 0000000000000000000000000000000000000000..19cfc057ad003636a5cc92ed128d851b02a4f105 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_jagged_dummy.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_get_jagged_dummy(Tensor any) -> Tensor +inline at::Tensor _nested_get_jagged_dummy(const at::Tensor & any) { + return at::_ops::_nested_get_jagged_dummy::call(any); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_strides.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_strides.h new file mode 100644 index 0000000000000000000000000000000000000000..1c688354e116862c1cda17db7bbd88cca7df8272 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_strides.h @@ -0,0 +1,34 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_tensor_strides_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::_nested_tensor_strides_out::call(self, out); +} +// aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_tensor_strides_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::_nested_tensor_strides_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged_copy_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0ed3a730a268d4f59b4142a98c45f17b5f16c4e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _nested_view_from_jagged_copy_out(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional & lengths, int64_t ragged_idx, at::Tensor & out); +TORCH_API at::Tensor _nested_view_from_jagged_copy(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional & lengths={}, int64_t ragged_idx=1); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_new_zeros_with_same_feature_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_new_zeros_with_same_feature_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..9253c827b2076e5ccc78cba6c45b235e89d0672b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_new_zeros_with_same_feature_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor +inline at::Tensor _new_zeros_with_same_feature_meta(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims=0) { + return at::_ops::_new_zeros_with_same_feature_meta::call(self, other, self_num_batch_dims); +} + +// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _new_zeros_with_same_feature_meta_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims=0) { + return at::_ops::_new_zeros_with_same_feature_meta_out::call(self, other, self_num_batch_dims, out); +} +// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _new_zeros_with_same_feature_meta_outf(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) { + return at::_ops::_new_zeros_with_same_feature_meta_out::call(self, other, self_num_batch_dims, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_packed_sequence_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_packed_sequence_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..495fba1114622c368a7fb90939635a325279a08d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_packed_sequence_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _pad_packed_sequence { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, bool, const at::Scalar &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pad_packed_sequence") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bc6082fd418b063092f34f949f42844f2e89e7e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _pdist_backward(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7001805a634d03ed31544b21830091a96f6acda5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..83d8af2cfd5a0c40e4b0885c0751d99654082c1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _sparse_sum(const at::Tensor & self, at::IntArrayRef dim); +TORCH_API at::Tensor & _sparse_sum_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim); +TORCH_API at::Tensor & _sparse_sum_outf(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..54392cb0917362ed0b884d065c6dd6cef7747815 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _transformer_encoder_layer_fwd(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask={}, c10::optional mask_type=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..11112ea1b7baef3405f6b4ffe7700844b3d62af6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _upsample_nearest_exact3d_vec { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, c10::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_nearest_exact3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor") + static at::Tensor call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors); +}; + +struct TORCH_API _upsample_nearest_exact3d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_nearest_exact3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +}; + +struct TORCH_API _upsample_nearest_exact3d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_nearest_exact3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/abs.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/abs.h new file mode 100644 index 0000000000000000000000000000000000000000..7c3d88bfd5c7fa7635864f7ee84fe287dac802c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/abs.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::abs(Tensor self) -> Tensor +inline at::Tensor abs(const at::Tensor & self) { + return at::_ops::abs::call(self); +} + +// aten::abs_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & abs_(at::Tensor & self) { + return at::_ops::abs_::call(self); +} + +// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::abs_out::call(self, out); +} +// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::abs_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..6e4fe376fbeeb5726e60e6d7fe1b1f750973af96 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_backward_grad_input::call(grad_output, self, indices, grad_input); +} +// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & adaptive_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::adaptive_max_pool3d_backward_grad_input::call(grad_output, self, indices, grad_input); +} + +// aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor +inline at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_backward::call(grad_output, self, indices); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ae5d8effd2d13f48d51ae05d886343dbd8c0be02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_addcmul_out : public at::meta::structured_addcmul { +void impl(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..4518d07c42e2e0a74de087bcb6f390ceff4e2b40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) +inline ::std::tuple batch_norm_stats(const at::Tensor & input, double eps) { + return at::_ops::batch_norm_stats::call(input, eps); +} + +// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple batch_norm_stats_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, double eps) { + return at::_ops::batch_norm_stats_out::call(input, eps, out0, out1); +} +// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple batch_norm_stats_outf(const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::batch_norm_stats_out::call(input, eps, out0, out1); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_min_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_min_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..369356cc4904c26af4994aa5b7089bf444c63fd0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_min_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API clamp_min { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_min") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_min(Tensor self, Scalar min) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & min); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min); +}; + +struct TORCH_API clamp_min_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_min") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_min.Tensor(Tensor self, Tensor min) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & min); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min); +}; + +struct TORCH_API clamp_min_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_min_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & min); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min); +}; + +struct TORCH_API clamp_min__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_min_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & min); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & min); +}; + +struct TORCH_API clamp_min_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_min") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & min, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min, at::Tensor & out); +}; + +struct TORCH_API clamp_min_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_min") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & min, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummaxmin_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummaxmin_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3f671adb1560fb7a82940619a8c94b88b05bf610 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummaxmin_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cummaxmin_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cummaxmin_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor") + static at::Tensor call(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/elu_backward_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/elu_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..0e30e0b28555fc414b50d0a330ecf744385d8ab8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/elu_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_elu_backward : public TensorIteratorBase { + + + void meta(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cdb010bf7fa381025a6b3a49c3bfabfff92e8960 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API embedding_sparse_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::embedding_sparse_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor") + static at::Tensor call(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fe10f78d34355d617c659ddce3acc4fb8377d0d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1282e3cfbddbe76d8cd749edb64cb28ed8a0a336 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6c3e4e887f84085d88771977931c95dcf9f5daf8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gcd_out : public at::meta::structured_gcd { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..100902cef992de25d43b9c97379b417ca54bdb2b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API glu_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::glu_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input); +}; + +struct TORCH_API glu_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::glu_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..45da88a4c4d95b747e5615ad26c0ae7c26534cb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid.h new file mode 100644 index 0000000000000000000000000000000000000000..bb544437c01b683d36ad28aa7c8a96bb76934495 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::hardsigmoid_out::call(self, out); +} +// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardsigmoid_out::call(self, out); +} + +// aten::hardsigmoid(Tensor self) -> Tensor +inline at::Tensor hardsigmoid(const at::Tensor & self) { + return at::_ops::hardsigmoid::call(self); +} + +// aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & hardsigmoid_(at::Tensor & self) { + return at::_ops::hardsigmoid_::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0815cd96efab939c37dd08122e79caa8f282ec1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor huber_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a4f3e136ebe261b908fc48dd390af63e641f922a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor i0(const at::Tensor & self); +TORCH_API at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & i0_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f379585d85c7eaec55415268b0dd5ba7cbdfbf83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d9b4fdbae96838e24427e7b16eab9ecaec642d58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor index(const at::Tensor & self, const c10::List> & indices); +TORCH_API at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List> & indices); +TORCH_API at::Tensor & index_outf(const at::Tensor & self, const c10::List> & indices, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..931beea0f6a43ca4bc0c21689d548e45024882e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_meta.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_index_Tensor : public TensorIteratorBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_sizes(at::DimVector value) { + static_assert(SIZES == false, "sizes already set"); + precompute_out ret; +ret.sizes = value; +ret.strides = this->strides; +return ret; + } + + + precompute_out set_strides(at::DimVector value) { + static_assert(STRIDES == false, "strides already set"); + precompute_out ret; +ret.sizes = this->sizes; +ret.strides = value; +return ret; + } + + at::DimVector sizes; +at::DimVector strides; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, at::IOptTensorListRef indices); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..97d19a2aed49393d57d11d14f60d5be91f139293 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_complex(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond.h new file mode 100644 index 0000000000000000000000000000000000000000..e9b3618b7a3e2c1fa2e1b9df2b728789e397d5cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor +inline at::Tensor linalg_cond(const at::Tensor & self, const c10::optional & p=c10::nullopt) { + return at::_ops::linalg_cond::call(self, p); +} + +// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, const c10::optional & p=c10::nullopt) { + return at::_ops::linalg_cond_out::call(self, p, out); +} +// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_outf(const at::Tensor & self, const c10::optional & p, at::Tensor & out) { + return at::_ops::linalg_cond_out::call(self, p, out); +} + +// aten::linalg_cond.p_str(Tensor self, str p) -> Tensor +inline at::Tensor linalg_cond(const at::Tensor & self, c10::string_view p) { + return at::_ops::linalg_cond_p_str::call(self, p); +} + +// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, c10::string_view p) { + return at::_ops::linalg_cond_p_str_out::call(self, p, out); +} +// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_outf(const at::Tensor & self, c10::string_view p, at::Tensor & out) { + return at::_ops::linalg_cond_p_str_out::call(self, p, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e9b5bfc6b109269190fbd467e58a2079a55d5841 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_outf(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..d95562e1ea8caff4d754688e41ebe1f71d484745 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_inv_ex : public at::impl::MetaBase { + + + void meta(const at::Tensor & A, bool check_errors); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eccb9154c1ecc07d439ef5f66d6e1fbc511c6631 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linear_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linear_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0bcde17f1418d703d9a7ec53a27a0bc0621d810c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linear_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linear { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias); +}; + +struct TORCH_API linear_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp.h new file mode 100644 index 0000000000000000000000000000000000000000..75ab038c04098c9ed19ea7a54633f40b83e4d892 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::logsumexp::call(self, dim, keepdim); +} + +// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::logsumexp_out::call(self, dim, keepdim, out); +} +// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logsumexp_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::logsumexp_out::call(self, dim, keepdim, out); +} + +// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor logsumexp(const at::Tensor & self, at::DimnameList dim, bool keepdim=false) { + return at::_ops::logsumexp_names::call(self, dim, keepdim); +} + +// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false) { + return at::_ops::logsumexp_names_out::call(self, dim, keepdim, out); +} +// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logsumexp_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) { + return at::_ops::logsumexp_names_out::call(self, dim, keepdim, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..940e3ca7dbfb15a15c4d6634111c33c06f367c92 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c6cc1c8d66e8b0fdeedfc36317d10143199fa30b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor matrix_exp_backward(const at::Tensor & self, const at::Tensor & grad); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..0e163b0b4661f3d3b5518a4999fb305513b455b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool2d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); +} +// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & max_pool2d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::max_pool2d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); +} + +// aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor +inline at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool2d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_convolution.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_convolution.h new file mode 100644 index 0000000000000000000000000000000000000000..4bc3e4cb350c27d18031fc250455a00ffcc38e7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_convolution.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor +inline at::Tensor mkldnn_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::mkldnn_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups); +} +namespace symint { + template ::value>> + at::Tensor mkldnn_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::mkldnn_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups); + } +} + +// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor +inline at::Tensor mkldnn_convolution_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::mkldnn_convolution::call(self, weight, bias, padding, stride, dilation, groups); +} +namespace symint { + template ::value>> + at::Tensor mkldnn_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::mkldnn_convolution::call(self, weight, bias, padding, stride, dilation, groups); + } +} + +// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::mkldnn_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); +} +namespace symint { + template ::value>> + at::Tensor & mkldnn_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::mkldnn_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } +} + +// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::mkldnn_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); +} +namespace symint { + template ::value>> + at::Tensor & mkldnn_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::mkldnn_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } +} + +// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::mkldnn_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out); +} +namespace symint { + template ::value>> + at::Tensor & mkldnn_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::mkldnn_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out); + } +} + +// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) { + return at::_ops::mkldnn_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out); +} +namespace symint { + template ::value>> + at::Tensor & mkldnn_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) { + return at::_ops::mkldnn_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_input.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_input.h new file mode 100644 index 0000000000000000000000000000000000000000..d214d8d2bd5f938105e9a46fed094b33d2a847bc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_input.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor +inline at::Tensor mkldnn_linear_backward_input(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) { + return at::_ops::mkldnn_linear_backward_input::call(input_size, grad_output, weight); +} + +// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_linear_backward_input_out(at::Tensor & out, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) { + return at::_ops::mkldnn_linear_backward_input_out::call(input_size, grad_output, weight, out); +} +// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_linear_backward_input_outf(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out) { + return at::_ops::mkldnn_linear_backward_input_out::call(input_size, grad_output, weight, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f033e9e60d32229b3bf8799c4252e234d2f8528b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & mkldnn_max_pool3d_backward_out(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +TORCH_API at::Tensor mkldnn_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a669989ad60d4d2c15051e55f990027ac21c2842 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor multi_margin_loss_cpu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multi_margin_loss_cpu_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight, int64_t reduction, at::Tensor & grad_input); +TORCH_API at::Tensor multi_margin_loss_cuda_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multi_margin_loss_cuda_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight, int64_t reduction, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mvlgamma_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mvlgamma_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d0246bad2cf7bc1f828a3bb9af06d99b7ad97f31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mvlgamma_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mvlgamma_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mvlgamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t p, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out); +}; + +struct TORCH_API mvlgamma { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mvlgamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mvlgamma(Tensor self, int p) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t p); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p); +}; + +struct TORCH_API mvlgamma_ { + using schema = at::Tensor & (at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mvlgamma_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t p); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t p); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/neg_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/neg_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5218e0f2a31f6bb178f98d074cb6f42795056b77 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/neg_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor neg(const at::Tensor & self); +TORCH_API at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & neg_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/output_nr_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/output_nr_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ef6b2b84dd774012b664d1a180ee2b221753d4ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/output_nr_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API int64_t output_nr(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/prod_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/prod_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..681227315bc4a8953afcfc7dcbb527bef7c04b8a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/prod_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_prod_dim_int : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional dtype); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7330c5fe351fb660425366c5b95c58cb1f9bae31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional generator); +TORCH_API at::Tensor & rand_outf(at::IntArrayRef size, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator); +TORCH_API at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randperm_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randperm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9e05664557314917366f0a048f647dbd4be01652 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randperm_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor randperm(int64_t n, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & randperm_out(int64_t n, at::Tensor & out); +TORCH_API at::Tensor randperm(int64_t n, c10::optional generator, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & randperm_out_cpu(int64_t n, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & randperm_out_cuda(int64_t n, c10::optional generator, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..79d8909f413418dca188ec6089c4ad3221f0dc58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor reflection_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor & reflection_pad1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_as_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_as_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0b47e8ece13c8e74e6ad6b03e90700066f408887 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_as_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor reshape_as(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor reshape_as_nested(const at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..07e64d539f183dbb59ff3b8333fc0114043854e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple rnn_relu(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API ::std::tuple rnn_relu(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter.h new file mode 100644 index 0000000000000000000000000000000000000000..e745cac7818056eec5ece0de0deed2d5e66a1fca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor +inline at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_src::call(self, dim, index, src); +} + +// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_src_out::call(self, dim, index, src, out); +} +// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) { + return at::_ops::scatter_src_out::call(self, dim, index, src, out); +} + +// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter_value::call(self, dim, index, value); +} + +// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter_value_out::call(self, dim, index, value, out); +} +// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { + return at::_ops::scatter_value_out::call(self, dim, index, value, out); +} + +// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor +inline at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + return at::_ops::scatter_reduce::call(self, dim, index, src, reduce); +} + +// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + return at::_ops::scatter_reduce_out::call(self, dim, index, src, reduce, out); +} +// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) { + return at::_ops::scatter_reduce_out::call(self, dim, index, src, reduce, out); +} + +// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor +inline at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + return at::_ops::scatter_value_reduce::call(self, dim, index, value, reduce); +} + +// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + return at::_ops::scatter_value_reduce_out::call(self, dim, index, value, reduce, out); +} +// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) { + return at::_ops::scatter_value_reduce_out::call(self, dim, index, value, reduce, out); +} + +// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor +inline at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_dimname_src::call(self, dim, index, src); +} + +// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter_dimname_value::call(self, dim, index, value); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4dba6685f9cd4f1a64bd32688981acd777631822 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor scatter_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & scatter_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & scatter_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out); +TORCH_API at::Tensor & scatter_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..eb473fc0af8a4db4fb6cb00e040da014dc8a63cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API segment_reduce { + using schema = at::Tensor (const at::Tensor &, c10::string_view, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, bool, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::segment_reduce") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor") + static at::Tensor call(const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial); +}; + +struct TORCH_API segment_reduce_out { + using schema = at::Tensor & (const at::Tensor &, c10::string_view, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, bool, const c10::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::segment_reduce") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sign.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sign.h new file mode 100644 index 0000000000000000000000000000000000000000..168409595a20648b94fe2d0d25d1d2d2dd4c1576 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sign.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sign(Tensor self) -> Tensor +inline at::Tensor sign(const at::Tensor & self) { + return at::_ops::sign::call(self); +} + +// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sign_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::sign_out::call(self, out); +} +// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sign_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::sign_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..693921ac35e2f1be7f9c139db63bd9b5238bf469 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_softshrink_backward_out : public at::meta::structured_softshrink_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..72baaa7c7470f189b3dbf0fc731771d31991da1e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API softshrink_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::softshrink_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); +}; + +struct TORCH_API softshrink_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::softshrink_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_entr_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_entr_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..40d87258cf80a580f380f5c04f14772da66b1934 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_entr_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_entr(const at::Tensor & self); +TORCH_API at::Tensor & special_entr_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_entr_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_erf.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_erf.h new file mode 100644 index 0000000000000000000000000000000000000000..b552c239458e39db7fa95ae8c0a9fe35429b6645 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_erf.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_erf(Tensor self) -> Tensor +inline at::Tensor special_erf(const at::Tensor & self) { + return at::_ops::special_erf::call(self); +} + +// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_erf_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_erf_out::call(self, out); +} +// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_erf_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_erf_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7c3a44d48df6fac3b67de2171910ced647f1a05c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Tensor & n); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f9895fccca145e8e1daa2f079f040cdc9300f9ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_hermite_polynomial_h_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a895095e9956a6ad9c8a215f21cc07a8fc98f31f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_modified_bessel_k1(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_psi_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_psi_native.h new file mode 100644 index 0000000000000000000000000000000000000000..83fa879fba4439bee5140d4f6f8092916022edbe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_psi_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor special_psi(const at::Tensor & self); +TORCH_API at::Tensor & special_psi_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/std.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/std.h new file mode 100644 index 0000000000000000000000000000000000000000..e7502c1ab72e48d1091e440731887fec0abd1ebc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/std.h @@ -0,0 +1,86 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::std(Tensor self, bool unbiased=True) -> Tensor +inline at::Tensor std(const at::Tensor & self, bool unbiased) { + return at::_ops::std::call(self, unbiased); +} + +// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_dim::call(self, dim, unbiased, keepdim); +} + +// aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor +inline at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_correction::call(self, dim, correction, keepdim); +} + +// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_out::call(self, dim, unbiased, keepdim, out); +} +// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::std_out::call(self, dim, unbiased, keepdim, out); +} + +// aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_correction_out::call(self, dim, correction, keepdim, out); +} +// aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out) { + return at::_ops::std_correction_out::call(self, dim, correction, keepdim, out); +} + +// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor std(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_names_dim::call(self, dim, unbiased, keepdim); +} + +// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_names_out::call(self, dim, unbiased, keepdim, out); +} +// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_outf(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::std_names_out::call(self, dim, unbiased, keepdim, out); +} + +// aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor +inline at::Tensor std(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_correction_names::call(self, dim, correction, keepdim); +} + +// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_correction_names_out::call(self, dim, correction, keepdim, out); +} +// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_outf(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim, at::Tensor & out) { + return at::_ops::std_correction_names_out::call(self, dim, correction, keepdim, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unflatten.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unflatten.h new file mode 100644 index 0000000000000000000000000000000000000000..a4f35784d668ed762c181130987b8b806d723461 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unflatten.h @@ -0,0 +1,69 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) +inline at::Tensor unflatten(const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) { + return at::_ops::unflatten_int::call(self, dim, c10::fromIntArrayRefSlow(sizes)); +} +namespace symint { + template ::value>> + at::Tensor unflatten(const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) { + return at::_ops::unflatten_int::call(self, dim, c10::fromIntArrayRefSlow(sizes)); + } +} + +// aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) +inline at::Tensor unflatten_symint(const at::Tensor & self, int64_t dim, c10::SymIntArrayRef sizes) { + return at::_ops::unflatten_int::call(self, dim, sizes); +} +namespace symint { + template ::value>> + at::Tensor unflatten(const at::Tensor & self, int64_t dim, c10::SymIntArrayRef sizes) { + return at::_ops::unflatten_int::call(self, dim, sizes); + } +} + +// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) +inline at::Tensor unflatten(const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) { + return at::_ops::unflatten_Dimname::call(self, dim, c10::fromIntArrayRefSlow(sizes), names); +} +namespace symint { + template ::value>> + at::Tensor unflatten(const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) { + return at::_ops::unflatten_Dimname::call(self, dim, c10::fromIntArrayRefSlow(sizes), names); + } +} + +// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) +inline at::Tensor unflatten_symint(const at::Tensor & self, at::Dimname dim, c10::SymIntArrayRef sizes, at::DimnameList names) { + return at::_ops::unflatten_Dimname::call(self, dim, sizes, names); +} +namespace symint { + template ::value>> + at::Tensor unflatten(const at::Tensor & self, at::Dimname dim, c10::SymIntArrayRef sizes, at::DimnameList names) { + return at::_ops::unflatten_Dimname::call(self, dim, sizes, names); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/values_copy_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/values_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1adfee09a3c2e7d76caca99f1f327ebe2c8140a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/values_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API values_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::values_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "values_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API values_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::values_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops