diff --git a/ckpts/universal/global_step120/zero/16.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/16.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..3ff15f251b97d2a950bcd51caea4ad8882e655ec --- /dev/null +++ b/ckpts/universal/global_step120/zero/16.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8ac1ae4d8ee6befe4171f120ba7ecf1904fbe3c50f1b6c29a1d4b0bbdef69c4 +size 50332843 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ecaf86eabb9f71f018ff84639425431ad905566e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API void _amp_foreach_non_finite_check_and_unscale_(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d5cabe749e9b3fba9ace0fdd1e67408049559d60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _conj { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_conj") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_conj(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b1c9986924edb51f038598bdfebb6422f3e909bd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _efficient_attention_forward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, c10::optional, c10::optional, double, int64_t, bool, c10::optional, const c10::optional &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_efficient_attention_forward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, int? max_seqlen_k, float dropout_p, int custom_mask_type, bool compute_log_sumexp=False, *, float? scale=None, Tensor? causal_diagonal=None, Tensor? seqlen_k=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, SymInt max_seqlen_batch_q, SymInt max_seqlen_batch_k)") + static ::std::tuple call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & bias, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, c10::optional max_seqlen_q, c10::optional max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp, c10::optional scale, const c10::optional & causal_diagonal, const c10::optional & seqlen_k); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & bias, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, c10::optional max_seqlen_q, c10::optional max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp, c10::optional scale, const c10::optional & causal_diagonal, const c10::optional & seqlen_k); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0cfc049c5c614697a46b3db0eefd648967a84eb4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _embedding_bag { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_embedding_bag") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx); +}; + +struct TORCH_API _embedding_bag_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, int64_t, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_embedding_bag") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))") + static ::std::tuple call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2c_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2c_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1a39763975e6672474b93d246f5007a9527cc819 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2c_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _fft_c2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward); +TORCH_API at::Tensor _fft_c2c_symint(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward); +TORCH_API at::Tensor & _fft_c2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward); +TORCH_API at::Tensor & _fft_c2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out); +TORCH_API at::Tensor & _fft_c2c_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward); +TORCH_API at::Tensor & _fft_c2c_symint_outf(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min.h new file mode 100644 index 0000000000000000000000000000000000000000..883754a68fd9105cfea5fca164e6ece430bc4cf5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min.h @@ -0,0 +1,82 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] +inline ::std::vector _foreach_clamp_min(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_min_Scalar::call(self, scalar); +} + +// aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () +inline void _foreach_clamp_min_(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_min__Scalar::call(self, scalar); +} + +// aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[] +inline ::std::vector _foreach_clamp_min(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_min_List::call(self, other); +} + +// aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> () +inline void _foreach_clamp_min_(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_min__List::call(self, other); +} + +// aten::_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] +inline ::std::vector _foreach_clamp_min(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_min_ScalarList::call(self, scalars); +} + +// aten::_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () +inline void _foreach_clamp_min_(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_min__ScalarList::call(self, scalars); +} + +// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_min_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_min_Scalar_out::call(self, scalar, out); +} +// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_min_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_clamp_min_Scalar_out::call(self, scalar, out); +} + +// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_min_out(at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_min_List_out::call(self, other, out); +} +// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_min_outf(at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_clamp_min_List_out::call(self, other, out); +} + +// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_min_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_min_ScalarList_out::call(self, scalars, out); +} +// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_min_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_clamp_min_ScalarList_out::call(self, scalars, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cosh_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cosh_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ed494719f4fbbe1cf69ea634f42b6e9d7503cdb7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cosh_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _foreach_cosh_out(at::TensorList out, at::TensorList self); +TORCH_API void _foreach_cosh_outf(at::TensorList self, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log1p_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log1p_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..31dc0c795e312675dbce4848f951672e96abd0c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log1p_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_log1p { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log1p") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log1p(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_log1p_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log1p_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log1p_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_log1p_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log1p") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh.h new file mode 100644 index 0000000000000000000000000000000000000000..67272e5ac6dc7f3c31b60826fe3a5d622c2d346b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_sinh(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_sinh(at::TensorList self) { + return at::_ops::_foreach_sinh::call(self); +} + +// aten::_foreach_sinh_(Tensor(a!)[] self) -> () +inline void _foreach_sinh_(at::TensorList self) { + return at::_ops::_foreach_sinh_::call(self); +} + +// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_sinh_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sinh_out::call(self, out); +} +// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_sinh_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sinh_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tan_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tan_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9f0ac47d9763ba3b3979615fc175b3c473466fc5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tan_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_tan { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_tan") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_tan(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_tan_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_tan_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_tan_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_tan_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_tan") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_cts_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_cts_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5b7c3dcc1b2ea7c3d73304b46ecd9357903b629b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_cts_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _histogramdd_from_bin_cts { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_histogramdd_from_bin_cts") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density); +}; + +struct TORCH_API _histogramdd_from_bin_cts_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_histogramdd_from_bin_cts") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_tensors_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_tensors_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bc4a172b23e4e9121b631a6bde1e62101913d55c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_tensors_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _histogramdd_from_bin_tensors(const at::Tensor & self, at::TensorList bins, const c10::optional & weight={}, bool density=false); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e875b66f19e4c5168a60cba8960aa84e890566c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _linalg_slogdet(const at::Tensor & A); +TORCH_API ::std::tuple _linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A); +TORCH_API ::std::tuple _linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_transpose_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_transpose_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..798a338c61a818a572afff1148d2c83a013adb42 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_transpose_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _mps_convolution_transpose { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_mps_convolution_transpose") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups); +}; + +struct TORCH_API _mps_convolution_transpose_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_mps_convolution_transpose") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cbbdcbd019121541d352e98b4936e0099c9dad54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _nested_tensor_from_mask(const at::Tensor & t, const at::Tensor & mask, bool mask_check=true); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_left_aligned_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_left_aligned_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c37474b69c19d3a4c1c5fc90edaa163bfbaa9bfb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_left_aligned_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nested_tensor_from_mask_left_aligned { + using schema = bool (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_tensor_from_mask_left_aligned") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool") + static bool call(const at::Tensor & t, const at::Tensor & mask); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unique_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unique_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a555ded0dcf54265a44cccb0f528905237fd4caa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unique_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _unique(const at::Tensor & self, bool sorted=true, bool return_inverse=false); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unpack_dual_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unpack_dual_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..dd35e468215e6bfadbcb2398e866c0ed0f1f85e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unpack_dual_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _unpack_dual { + using schema = ::std::tuple (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unpack_dual") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)") + static ::std::tuple call(const at::Tensor & dual, int64_t level); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dual, int64_t level); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cartesian_prod_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cartesian_prod_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..85d312cac632f4e20b3d6349ca6a27bdd6d3cba3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cartesian_prod_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cartesian_prod { + using schema = at::Tensor (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cartesian_prod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cartesian_prod(Tensor[] tensors) -> Tensor") + static at::Tensor call(at::TensorList tensors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd.h new file mode 100644 index 0000000000000000000000000000000000000000..5fd2b33ad3bec2dd159277ef6f299c2ad0a1a761 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor +inline at::Tensor constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd::call(self, c10::fromIntArrayRefSlow(pad), value); +} +namespace symint { + template ::value>> + at::Tensor constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd::call(self, c10::fromIntArrayRefSlow(pad), value); + } +} + +// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor +inline at::Tensor constant_pad_nd_symint(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd::call(self, pad, value); +} +namespace symint { + template ::value>> + at::Tensor constant_pad_nd(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd::call(self, pad, value); + } +} + +// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & constant_pad_nd_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd_out::call(self, c10::fromIntArrayRefSlow(pad), value, out); +} +namespace symint { + template ::value>> + at::Tensor & constant_pad_nd_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd_out::call(self, c10::fromIntArrayRefSlow(pad), value, out); + } +} + +// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & constant_pad_nd_outf(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value, at::Tensor & out) { + return at::_ops::constant_pad_nd_out::call(self, c10::fromIntArrayRefSlow(pad), value, out); +} +namespace symint { + template ::value>> + at::Tensor & constant_pad_nd_outf(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value, at::Tensor & out) { + return at::_ops::constant_pad_nd_out::call(self, c10::fromIntArrayRefSlow(pad), value, out); + } +} + +// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & constant_pad_nd_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd_out::call(self, pad, value, out); +} +namespace symint { + template ::value>> + at::Tensor & constant_pad_nd_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd_out::call(self, pad, value, out); + } +} + +// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & constant_pad_nd_symint_outf(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) { + return at::_ops::constant_pad_nd_out::call(self, pad, value, out); +} +namespace symint { + template ::value>> + at::Tensor & constant_pad_nd_outf(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) { + return at::_ops::constant_pad_nd_out::call(self, pad, value, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc_backward_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cded0a4df325fa85f4f1e531457969b68ebee5ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple conv_tbc_backward(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose3d_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose3d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6012f35b5ac72fe52b20a898be5d435238b7a73f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose3d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor conv_transpose3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1); +TORCH_API at::Tensor conv_transpose3d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymInt groups=1, c10::SymIntArrayRef dilation=c10::SymInt(1)); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..15aed586fcbd8697086bb721ffa7f88b8d41f560 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & cudnn_affine_grid_generator_backward_out(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out); +TORCH_API at::Tensor cudnn_affine_grid_generator_backward(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a4babdc2d74c043bb677e7945108ffafbd2f62a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor cudnn_convolution(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32); +TORCH_API at::Tensor & cudnn_convolution_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b522404cf7975c19a5f2cbdb1ff504a5dd83b43 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple cummin(const at::Tensor & self, at::Dimname dim); +TORCH_API ::std::tuple cummin_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim); +TORCH_API ::std::tuple cummin_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..ab894a7ecc6ba1f8c53866ce76c29cba5adb9dda --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_cumprod : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, c10::optional dtype); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..9a49dae6f7f78e508c42b9824903be44e5cd4071 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor +inline at::Tensor diagonal_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2); +} +namespace symint { + template ::value>> + at::Tensor diagonal_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2); + } +} + +// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor +inline at::Tensor diagonal_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2); +} +namespace symint { + template ::value>> + at::Tensor diagonal_backward(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2); + } +} + +// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diagonal_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out); +} +namespace symint { + template ::value>> + at::Tensor & diagonal_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out); + } +} + +// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diagonal_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out); +} +namespace symint { + template ::value>> + at::Tensor & diagonal_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out); + } +} + +// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diagonal_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward_out::call(grad_output, input_sizes, offset, dim1, dim2, out); +} +namespace symint { + template ::value>> + at::Tensor & diagonal_backward_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward_out::call(grad_output, input_sizes, offset, dim1, dim2, out); + } +} + +// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diagonal_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_backward_out::call(grad_output, input_sizes, offset, dim1, dim2, out); +} +namespace symint { + template ::value>> + at::Tensor & diagonal_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_backward_out::call(grad_output, input_sizes, offset, dim1, dim2, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_dense_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_dense_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..28f44748322e35b6acda968436052d9264d70161 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_dense_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & embedding_dense_backward_out_symint(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out); +TORCH_API at::Tensor embedding_dense_backward_cpu(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); +TORCH_API at::Tensor embedding_dense_backward_cuda(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..50596d03fe06e591ed571a513c02821724a16b2f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fake_quantize_per_channel_affine { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fake_quantize_per_channel_affine") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..64b485b5b26b3301ad5eb4f7d1dc7c4a63809ea8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fake_quantize_per_tensor_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2ee6e61c5f8d5dfa8cfb69d4f78261caf9e0f205 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple fake_quantize_per_tensor_affine_cachemask(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6b8dd4e8a5906ce0be42b0b49045176c038b60ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & fill_(at::Tensor & self, const at::Scalar & value); +TORCH_API at::Tensor & fill_(at::Tensor & self, const at::Tensor & value); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flipud_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flipud_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5c791e5971c54d1d17a39872f541ec8a3d525730 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flipud_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor flipud(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..28541d74214abc405ba8655c6a0c6f16ac0b3df4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor floor(const at::Tensor & self); +TORCH_API at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & floor_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmin_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmin_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eda5c345f10a9c5bb46a14776d6dd40805928803 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmin_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor fmin(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmin_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmin_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..24d179e058031f4970cb95a7ed71fdebaf01e6a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2481afb3a9c3d918e90d4adb6325664986afbf58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2a6f18f26a9b422ba359fdbf1dbb9e6acfdb5e35 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple geqrf(const at::Tensor & self); +TORCH_API ::std::tuple geqrf_out(at::Tensor & a, at::Tensor & tau, const at::Tensor & self); +TORCH_API ::std::tuple geqrf_outf(const at::Tensor & self, at::Tensor & a, at::Tensor & tau); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1f87bc0f0a5b07c21e4a312fc00f1880f6b344e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & grid_sampler_2d_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API at::Tensor & grid_sampler_2d_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4da0b335eb8c166ae48ab8a60214370b16ef1d19 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor histogram_histc(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & histogram_histc_out(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); +TORCH_API at::Tensor _histc_cuda(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & _histc_out_cuda(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1c7b8206515af1375770ce99df6ac17821cc3dd7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API bool is_same_size(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7a435da3a8c86e02487c8a21407fe6688c63a7b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_eigvals(const at::Tensor & self); +TORCH_API at::Tensor & linalg_eigvals_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p.h new file mode 100644 index 0000000000000000000000000000000000000000..f596630ed82b0d07ab90fe630d693bb8f7d2b2ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log1p(Tensor self) -> Tensor +inline at::Tensor log1p(const at::Tensor & self) { + return at::_ops::log1p::call(self); +} + +// aten::log1p_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & log1p_(at::Tensor & self) { + return at::_ops::log1p_::call(self); +} + +// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::log1p_out::call(self, out); +} +// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::log1p_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..8e558f01b6b6beede7a93d06d8b6674fc74d9cdd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_log1p : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_native.h new file mode 100644 index 0000000000000000000000000000000000000000..666f2ea3f067496a58a7d0c5c7bdc9ad6f7432d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_log1p_out : public at::meta::structured_log1p { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor log1p_sparse(const at::Tensor & self); +TORCH_API at::Tensor & log1p_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log1p_sparse_(at::Tensor & self); +TORCH_API at::Tensor log1p_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & log1p_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log1p_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0da0108e932711216fc2381fb6433058ba00dc37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..25a965a4908cb63d2e4ca26154331a09505c12a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor minimum(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & minimum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & minimum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..5c909fb5fce05b8bb511e13be7bc1a1a218b734b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & nll_loss2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & nll_loss2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } +} + +// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & nll_loss2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) { + return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & nll_loss2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) { + return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } +} + +// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & nll_loss2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & nll_loss2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } +} + +// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & nll_loss2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) { + return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & nll_loss2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) { + return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } +} + +// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor +inline at::Tensor nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight); +} +namespace symint { + template ::value>> + at::Tensor nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight); + } +} + +// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor +inline at::Tensor nll_loss2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight); +} +namespace symint { + template ::value>> + at::Tensor nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2df7534f56de68cd8c16175e64ac59f24543e257 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor nll_loss_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100); +TORCH_API at::Tensor & nll_loss_out(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_nd_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_nd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..51220ec70e4ec410acba892d04fdaf1917687511 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_nd_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor nll_loss_nd_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..24d7d2dfa70d45871f7d1c57c40aac607783bb6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor nonzero(const at::Tensor & self); +TORCH_API at::Tensor & nonzero_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & nonzero_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ones_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ones_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d6ef4827f675afdc5191186e4ba20e2b8d15c3b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ones_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor ones(at::IntArrayRef size, c10::optional names, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & ones_names_out(at::IntArrayRef size, c10::optional names, at::Tensor & out); +TORCH_API at::Tensor ones(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & ones_out(at::IntArrayRef size, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_lstm_cell_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_lstm_cell_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b098ebbf07e4f3909ebef2a2284e0512d65a1f67 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_lstm_cell_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple quantized_lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7687fbaf1e956e4f25b736815e555d7e2fb7ed7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_reciprocal_out : public at::meta::structured_reciprocal { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8004a5c8f5fc31c3ac59810cb1c5527bfbd00168 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor reflection_pad3d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor & reflection_pad3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad3d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_as_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_as_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..901b0fac9bfbcde7ec6109ead227c0f331f60d23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_as_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API reshape_as { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reshape_as") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/retains_grad_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/retains_grad_native.h new file mode 100644 index 0000000000000000000000000000000000000000..410b97659c5a0055b8cd100be07b1192180b4ece --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/retains_grad_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool retains_grad(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..01033edb6704e7cd9f0cb68230a16a72dd440da9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & segment_reduce_out(at::Tensor & out, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths={}, const c10::optional & indices={}, const c10::optional & offsets={}, int64_t axis=0, bool unsafe=false, const c10::optional & initial=c10::nullopt); +TORCH_API at::Tensor & segment_reduce_outf(const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5d3dfa23d14597b21155b15f3f962addd085d9df --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_bessel_y1(const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_y1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_y1_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c380004c13e6ab26e01caea07ffd2214528af014 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f7829c9f5b21dfc7c86a94872ceb3592edf4ccdb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_i1e(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_logit_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_logit_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8de20df0dada36439e2db079c2f60ed0e946df8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_logit_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor special_logit(const at::Tensor & self, c10::optional eps=c10::nullopt); +TORCH_API at::Tensor & special_logit_out(at::Tensor & out, const at::Tensor & self, c10::optional eps=c10::nullopt); +TORCH_API at::Tensor & special_logit_outf(const at::Tensor & self, c10::optional eps, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3b4087098a823476de3503975f139f50c378f83d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c90f200015b0349cddbff8bd15ad064719be9c47 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & sum_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_stride_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_stride_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b99f10a97326177f3bdcf0cee807d97491f1845f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_stride_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API c10::SymInt sym_stride(const at::Tensor & self, int64_t dim); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bbd0e4502aa39ac5c9f4c5ae6658c01610973f2e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_tanh_backward_out : public at::meta::structured_tanh_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensor_split.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensor_split.h new file mode 100644 index 0000000000000000000000000000000000000000..dbe39dda0b935f51311a9aaa270fdcfaf6c1465d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensor_split.h @@ -0,0 +1,74 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] +inline ::std::vector tensor_split(const at::Tensor & self, int64_t sections, int64_t dim=0) { + return at::_ops::tensor_split_sections::call(self, sections, dim); +} +namespace symint { + template ::value>> + ::std::vector tensor_split(const at::Tensor & self, int64_t sections, int64_t dim=0) { + return at::_ops::tensor_split_sections::call(self, sections, dim); + } +} + +// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] +inline ::std::vector tensor_split_symint(const at::Tensor & self, c10::SymInt sections, int64_t dim=0) { + return at::_ops::tensor_split_sections::call(self, sections, dim); +} +namespace symint { + template ::value>> + ::std::vector tensor_split(const at::Tensor & self, c10::SymInt sections, int64_t dim=0) { + return at::_ops::tensor_split_sections::call(self, sections, dim); + } +} + +// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] +inline ::std::vector tensor_split(const at::Tensor & self, at::IntArrayRef indices, int64_t dim=0) { + return at::_ops::tensor_split_indices::call(self, c10::fromIntArrayRefSlow(indices), dim); +} +namespace symint { + template ::value>> + ::std::vector tensor_split(const at::Tensor & self, at::IntArrayRef indices, int64_t dim=0) { + return at::_ops::tensor_split_indices::call(self, c10::fromIntArrayRefSlow(indices), dim); + } +} + +// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] +inline ::std::vector tensor_split_symint(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim=0) { + return at::_ops::tensor_split_indices::call(self, indices, dim); +} +namespace symint { + template ::value>> + ::std::vector tensor_split(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim=0) { + return at::_ops::tensor_split_indices::call(self, indices, dim); + } +} + +// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] +inline ::std::vector tensor_split(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim=0) { + return at::_ops::tensor_split_tensor_indices_or_sections::call(self, tensor_indices_or_sections, dim); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensor_split_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensor_split_native.h new file mode 100644 index 0000000000000000000000000000000000000000..81d46f23cfd8c687436760df35e770de0dd4ebc5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensor_split_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector tensor_split_sections_symint(const at::Tensor & self, c10::SymInt sections, int64_t dim=0); +TORCH_API ::std::vector tensor_split_indices_symint(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim=0); +TORCH_API ::std::vector tensor_split(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim=0); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/thnn_conv2d.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/thnn_conv2d.h new file mode 100644 index 0000000000000000000000000000000000000000..9489be6cfc6c8ca10c86ac8b2f54f152c6e455d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/thnn_conv2d.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & thnn_conv2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::thnn_conv2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); +} +namespace symint { + template ::value>> + at::Tensor & thnn_conv2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::thnn_conv2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); + } +} + +// aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & thnn_conv2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::thnn_conv2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); +} +namespace symint { + template ::value>> + at::Tensor & thnn_conv2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::thnn_conv2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); + } +} + +// aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & thnn_conv2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::thnn_conv2d_out::call(self, weight, kernel_size, bias, stride, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & thnn_conv2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::thnn_conv2d_out::call(self, weight, kernel_size, bias, stride, padding, out); + } +} + +// aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & thnn_conv2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::thnn_conv2d_out::call(self, weight, kernel_size, bias, stride, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & thnn_conv2d_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::thnn_conv2d_out::call(self, weight, kernel_size, bias, stride, padding, out); + } +} + +// aten::thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor +inline at::Tensor thnn_conv2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::thnn_conv2d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); +} +namespace symint { + template ::value>> + at::Tensor thnn_conv2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::thnn_conv2d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); + } +} + +// aten::thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor +inline at::Tensor thnn_conv2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::thnn_conv2d::call(self, weight, kernel_size, bias, stride, padding); +} +namespace symint { + template ::value>> + at::Tensor thnn_conv2d(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::thnn_conv2d::call(self, weight, kernel_size, bias, stride, padding); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/topk_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/topk_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..484d95c32872e5d10ead57b5aea9ed548c53c272 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/topk_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple topk(const at::Tensor & self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true); +TORCH_API ::std::tuple topk_symint(const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool largest=true, bool sorted=true); +TORCH_API ::std::tuple topk_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true); +TORCH_API ::std::tuple topk_outf(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple topk_symint_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool largest=true, bool sorted=true); +TORCH_API ::std::tuple topk_symint_outf(const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4dfe5056e4d41771e59fa82ad47cdd68456127f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor trapz(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1); +TORCH_API at::Tensor trapz(const at::Tensor & y, double dx=1, int64_t dim=-1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triu.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triu.h new file mode 100644 index 0000000000000000000000000000000000000000..4bbcb5bd49a98a120a3fceb339717a2d0c916ddb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triu.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & triu_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::triu_out::call(self, diagonal, out); +} +// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & triu_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out) { + return at::_ops::triu_out::call(self, diagonal, out); +} + +// aten::triu(Tensor self, int diagonal=0) -> Tensor +inline at::Tensor triu(const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::triu::call(self, diagonal); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..02176e9efc9f875db960262dbe970f99f3a112b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API unsqueeze { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unsqueeze") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unsqueeze(Tensor(a) self, int dim) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim); +}; + +struct TORCH_API unsqueeze_ { + using schema = at::Tensor & (at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unsqueeze_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t dim); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as.h new file mode 100644 index 0000000000000000000000000000000000000000..ac38603e7604e9f7aceee8e46629f874d5261fbd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6b21edfc305bb42905aeb9b68bb7cbeaa74fd7df --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor view_as_real(const at::Tensor & self); + +} // namespace meta +} // namespace at