diff --git a/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..21779d5ed83e03666c56fe466b328fdac83b9289 --- /dev/null +++ b/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec7679370270676bc4023adb10348de7a992ad2e9006b6f2ba7c947fb296fa42 +size 33555533 diff --git a/ckpts/universal/global_step20/zero/14.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/14.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..946fcb4255feba8afa148efadaa2fe8de48aa4cc --- /dev/null +++ b/ckpts/universal/global_step20/zero/14.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07dcb14bcfd5ba5d7d8ff091c38f9fefd52c8bac970509909a49ef27cdef318f +size 33555612 diff --git a/ckpts/universal/global_step20/zero/14.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step20/zero/14.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..e2f59ee036abe6947e874745f2b2dbbd02cbbb3a --- /dev/null +++ b/ckpts/universal/global_step20/zero/14.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61e308f88ff75f6b02dd88b48e2cb4591d36d6d33968bea182b78e329a0a8a0f +size 33555533 diff --git a/ckpts/universal/global_step20/zero/20.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/20.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..84357d6048ab7f0e90a3044e2cc1ac2ff7577fa2 --- /dev/null +++ b/ckpts/universal/global_step20/zero/20.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0476988ffe18238e4c486cdc0ea848595e45dd9d036e6d814d44d6384823da82 +size 16778396 diff --git a/ckpts/universal/global_step20/zero/26.attention.dense.weight/fp32.pt b/ckpts/universal/global_step20/zero/26.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..edaf4975b345912334ac32b99485f696a73d00ff --- /dev/null +++ b/ckpts/universal/global_step20/zero/26.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:477b568d996f9bd317a67b5d99f1710b98b4c75d4dcd4a82242dc9c73596d153 +size 16778317 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h new file mode 100644 index 0000000000000000000000000000000000000000..b35c9657527d84c022ffeea845b9b37fdd17ee70 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include +#include + +// this convertor will: +// 1) take a Tensor object and wrap it in the DLPack tensor +// 2) take a dlpack tensor and convert it to the ATen Tensor + +namespace at { + +TORCH_API ScalarType toScalarType(const DLDataType& dtype); +TORCH_API DLManagedTensor* toDLPack(const Tensor& src); +TORCH_API Tensor fromDLPack(DLManagedTensor* src); +C10_DEPRECATED_MESSAGE("Please migrate to a non-const variant") +inline Tensor fromDLPack(const DLManagedTensor* src) { + return fromDLPack(const_cast(src)); +} +TORCH_API Tensor +fromDLPack(DLManagedTensor* src, std::function deleter); +TORCH_API DLDataType getDLDataType(const Tensor& t); +TORCH_API DLDevice getDLContext(const Tensor& tensor, const int64_t& device_id); + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ParallelFuture.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ParallelFuture.h new file mode 100644 index 0000000000000000000000000000000000000000..042cd92da19345d7523671ca75da7279d13062a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ParallelFuture.h @@ -0,0 +1,13 @@ +#pragma once + +#include +#include +#include + +namespace at { + +// Launches intra-op parallel task, returns a future +TORCH_API c10::intrusive_ptr intraop_launch_future( + std::function func); + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3b0bacdad27ef87e0fe1075ba30d097d7ccc3209 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _conj(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b607e83c528a8f5eada4b0aa043a6674866f75b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _conj(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..09b58daaa69778d2fa52adfb49927372a99995d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured__convert_indices_from_csr_to_coo : public at::impl::MetaBase { + + + void meta(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummin_helper.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummin_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..c6a077e3d2fe7708597ea18aa49857c130467037 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummin_helper.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () +inline void _cummin_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) { + return at::_ops::_cummin_helper::call(self, values, indices, dim); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummin_helper_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummin_helper_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ebe8141ba94fef859465f64ccad97d64f176da25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummin_helper_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API void _cummin_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..37dae0fd294f21557113fe2e8271c1a7d3ac77c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _efficient_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & bias, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, c10::optional max_seqlen_q, c10::optional max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp=false, c10::optional scale=c10::nullopt, const c10::optional & causal_diagonal={}, const c10::optional & seqlen_k={}); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_forward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_forward.h new file mode 100644 index 0000000000000000000000000000000000000000..129daca273893d1db21c45cc2e25e61626e495dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_forward.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, bool return_debug_mask, *, float? scale=None) -> (Tensor output, Tensor softmax_logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) +inline ::std::tuple _flash_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & cum_seq_q, const c10::optional & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale=c10::nullopt) { + return at::_ops::_flash_attention_forward::call(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale); +} +namespace symint { + template ::value>> + ::std::tuple _flash_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & cum_seq_q, const c10::optional & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale=c10::nullopt) { + return at::_ops::_flash_attention_forward::call(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale); + } +} + +// aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, bool return_debug_mask, *, float? scale=None) -> (Tensor output, Tensor softmax_logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask) +inline ::std::tuple _flash_attention_forward_symint(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & cum_seq_q, const c10::optional & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale=c10::nullopt) { + return at::_ops::_flash_attention_forward::call(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale); +} +namespace symint { + template ::value>> + ::std::tuple _flash_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & cum_seq_q, const c10::optional & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale=c10::nullopt) { + return at::_ops::_flash_attention_forward::call(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div.h new file mode 100644 index 0000000000000000000000000000000000000000..fa8542ed9cd1e7c0f632a4dadc08d25cd09a0562 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div.h @@ -0,0 +1,101 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] +inline ::std::vector _foreach_div(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_div_Scalar::call(self, scalar); +} + +// aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () +inline void _foreach_div_(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_div__Scalar::call(self, scalar); +} + +// aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[] +inline ::std::vector _foreach_div(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_div_List::call(self, other); +} + +// aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> () +inline void _foreach_div_(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_div__List::call(self, other); +} + +// aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] +inline ::std::vector _foreach_div(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_div_ScalarList::call(self, scalars); +} + +// aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () +inline void _foreach_div_(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_div__ScalarList::call(self, scalars); +} + +// aten::_foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[] +inline ::std::vector _foreach_div(at::TensorList self, const at::Tensor & other) { + return at::_ops::_foreach_div_Tensor::call(self, other); +} + +// aten::_foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> () +inline void _foreach_div_(at::TensorList self, const at::Tensor & other) { + return at::_ops::_foreach_div__Tensor::call(self, other); +} + +// aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_div_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_div_Scalar_out::call(self, scalar, out); +} +// aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_div_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_div_Scalar_out::call(self, scalar, out); +} + +// aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_div_out(at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_div_List_out::call(self, other, out); +} +// aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_div_outf(at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_div_List_out::call(self, other, out); +} + +// aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_div_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_div_ScalarList_out::call(self, scalars, out); +} +// aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_div_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_div_ScalarList_out::call(self, scalars, out); +} + +// aten::_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> () +inline void _foreach_div_out(at::TensorList out, at::TensorList self, const at::Tensor & other) { + return at::_ops::_foreach_div_Tensor_out::call(self, other, out); +} +// aten::_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> () +inline void _foreach_div_outf(at::TensorList self, const at::Tensor & other, at::TensorList out) { + return at::_ops::_foreach_div_Tensor_out::call(self, other, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_expm1_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_expm1_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..23021887b559d20b51da2b961b21b210c14563c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_expm1_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_expm1 { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_expm1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_expm1(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_expm1_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_expm1_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_expm1_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_expm1_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_expm1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log1p.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log1p.h new file mode 100644 index 0000000000000000000000000000000000000000..235e4b417ce918e9a0fdc2aa04a8ab0978eadbc8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log1p.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_log1p(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_log1p(at::TensorList self) { + return at::_ops::_foreach_log1p::call(self); +} + +// aten::_foreach_log1p_(Tensor(a!)[] self) -> () +inline void _foreach_log1p_(at::TensorList self) { + return at::_ops::_foreach_log1p_::call(self); +} + +// aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_log1p_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log1p_out::call(self, out); +} +// aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_log1p_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log1p_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e775c2b849617833fe1aa3e2eb85ff5840635cec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_sign_out(at::TensorList self, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_sign_slow(at::TensorList self); +TORCH_API void foreach_tensor_sign_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_sign_cuda(at::TensorList self); +TORCH_API void foreach_tensor_sign_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_lstm_mps_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_lstm_mps_native.h new file mode 100644 index 0000000000000000000000000000000000000000..afe96eeba989ce8642609c38e61f7759141c4606 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_lstm_mps_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _lstm_mps_out(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_lu_with_info.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_lu_with_info.h new file mode 100644 index 0000000000000000000000000000000000000000..bb068a7acbae957a97c6678c2cde933e08fafe2f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_lu_with_info.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info) +inline ::std::tuple _lu_with_info(const at::Tensor & self, bool pivot=true, bool check_errors=true) { + return at::_ops::_lu_with_info::call(self, pivot, check_errors); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2f520c20339deaff2caf76b6d588aecf678f1d2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _make_per_channel_quantized_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded_and_nested_example_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded_and_nested_example_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0510927a120e8a6353faf3fa05542c42510e6893 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded_and_nested_example_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _nested_from_padded_and_nested_example_out(at::Tensor & out, const at::Tensor & padded, const at::Tensor & nt_example); +TORCH_API at::Tensor & _nested_from_padded_and_nested_example_outf(const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c09e7f54ba192b7b8694bdedc5b957c853951fc2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride); +TORCH_API at::Tensor _reshape_alias_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d98bc55c6105bff07b3ae9232de3ba1e3033d72e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _slow_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API ::std::tuple _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias); +TORCH_API ::std::tuple _slow_conv2d_backward_symint_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); +TORCH_API ::std::tuple _slow_conv2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias); +TORCH_API ::std::tuple _slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask); +TORCH_API ::std::tuple _slow_conv2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..577ef30cc78b35c499f4148fcdfa6ceb5885daf9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _softmax { + using schema = at::Tensor (const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_softmax(Tensor self, int dim, bool half_to_float) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, bool half_to_float); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float); +}; + +struct TORCH_API _softmax_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fa5c90658836e34877f10d42d3033b32cb373c71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_bsc_tensor_unsafe { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_bsc_tensor_unsafe") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f465c6a58d2b8efa48c3aae1d3899a41bdd636ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_csr_prod_dim_dtype { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_csr_prod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_dtype") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional dtype); +}; + +struct TORCH_API _sparse_csr_prod_dim_dtype_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_csr_prod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_dtype_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..133830a2ae390eb697fa6f6fd38f7db1d94dfd83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _thnn_fused_gru_cell_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_thnn_fused_gru_cell_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias); +}; + +struct TORCH_API _thnn_fused_gru_cell_backward_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, bool, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_thnn_fused_gru_cell_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))") + static ::std::tuple call(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..38aebe74da39c71450e2d32e09d97fd2849d35db --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _upsample_nearest_exact3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_nearest_exact3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +}; + +struct TORCH_API _upsample_nearest_exact3d_backward { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_nearest_exact3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool1d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool1d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..007d4c1b90af81317186dd13bedcc413d6367c69 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool1d_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_max_pool1d { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef output_size); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8bc988555d4df387f3c54e96f577e88c87f38ac0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a4dd3bfe5635f18a68b2a93117c1b0fb8fd2ad26 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_max_pool3d_out { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); +}; + +struct TORCH_API adaptive_max_pool3d { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef output_size); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amin_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amin_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bd629c6294943c61b88ae48dbff5769e602cfb66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amin_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API amin { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::amin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim); +}; + +struct TORCH_API amin_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::amin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..82610c35ce32005cb24acd1af73a79242e3c0854 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor binary_cross_entropy_cpu(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & binary_cross_entropy_out_cpu(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & out); +TORCH_API at::Tensor binary_cross_entropy_cuda(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & binary_cross_entropy_out_cuda(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e71a78f732dd4df40e8ab80159460c224b910fec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cummin { + using schema = ::std::tuple (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cummin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)") + static ::std::tuple call(const at::Tensor & self, int64_t dim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim); +}; + +struct TORCH_API cummin_out { + using schema = ::std::tuple (const at::Tensor &, int64_t, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cummin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)") + static ::std::tuple call(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices); +}; + +struct TORCH_API cummin_dimname { + using schema = ::std::tuple (const at::Tensor &, at::Dimname); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cummin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)") + static ::std::tuple call(const at::Tensor & self, at::Dimname dim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim); +}; + +struct TORCH_API cummin_dimname_out { + using schema = ::std::tuple (const at::Tensor &, at::Dimname, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cummin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)") + static ::std::tuple call(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_copy_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..46489889de9e731e487078f8ea0742bc58d12a24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & diagonal_copy_out(at::Tensor & out, const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1); +TORCH_API at::Tensor & diagonal_copy_outf(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1474453f1644deb4f2c42ebe05c3db4c7693e814 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor digamma(const at::Tensor & self); +TORCH_API at::Tensor & digamma_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & digamma_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & digamma_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/einsum_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/einsum_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bf977c9dd59ec4817ff74c2fcdb128f5430b77d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/einsum_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API einsum { + using schema = at::Tensor (c10::string_view, at::TensorList, at::OptionalIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::einsum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor") + static at::Tensor call(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/elu_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/elu_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b956816a6fd929ac1f7dc186ed08cc68d18a3a2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/elu_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor elu_backward(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result); +TORCH_API at::Tensor & elu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result); +TORCH_API at::Tensor & elu_backward_outf(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_renorm.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_renorm.h new file mode 100644 index 0000000000000000000000000000000000000000..279dd0be2c937fd4eea5bc7d6446fb60e6eb5245 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_renorm.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!) +inline at::Tensor & embedding_renorm_(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { + return at::_ops::embedding_renorm_::call(self, indices, max_norm, norm_type); +} + +// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & embedding_renorm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { + return at::_ops::embedding_renorm_out::call(self, indices, max_norm, norm_type, out); +} +// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & embedding_renorm_outf(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out) { + return at::_ops::embedding_renorm_out::call(self, indices, max_norm, norm_type, out); +} + +// aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor +inline at::Tensor embedding_renorm(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { + return at::_ops::embedding_renorm::call(self, indices, max_norm, norm_type); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..293a3ca8dfa79ec35921db7d257ab62fc0fe3499 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor +inline at::Tensor embedding_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_sparse_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/empty_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/empty_native.h new file mode 100644 index 0000000000000000000000000000000000000000..40e5fa0e276039a4214f1e6f404efccd99331827 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/empty_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor empty_names(at::IntArrayRef size, c10::optional names, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor & empty_names_out(at::IntArrayRef size, c10::optional names, c10::optional memory_format, at::Tensor & out); +TORCH_API at::Tensor & empty_out(at::IntArrayRef size, c10::optional memory_format, at::Tensor & out); +TORCH_API at::Tensor empty_cpu(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor empty_cuda(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor empty_sparse(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor empty_sparse_compressed(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor empty_meta_symint(c10::SymIntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor empty_mkldnn(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor empty_unknown_quantized(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/exp_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/exp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b6e2df918d750d8abca4d71f3809949694fbd3f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/exp_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor exp(const at::Tensor & self); +TORCH_API at::Tensor & exp_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & exp_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & exp_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e3465a8e6f977747e7299f5f680d3293d52b7586 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fbgemm_linear_fp16_weight { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fbgemm_linear_fp16_weight") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftshift_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftshift_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4f6b519bab2d96dec2d29c807681731f361aec34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftshift_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fft_ifftshift(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmax_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c42aa69f89b6fa837378d18924b66ae09f0a541d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmax_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fmax { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmax(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API fmax_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d.h new file mode 100644 index 0000000000000000000000000000000000000000..9920ae95de1a2a99860b3369526c6dae3ccf189c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool3d_output::call(self, kernel_size, output_size, random_samples, output, indices); +} +// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple fractional_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) { + return at::_ops::fractional_max_pool3d_output::call(self, kernel_size, output_size, random_samples, output, indices); +} + +// aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) +inline ::std::tuple fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool3d::call(self, kernel_size, output_size, random_samples); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a79a92cec390de19ec2b126aa1dabc27bd717b0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gather_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gather") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); +}; + +struct TORCH_API gather { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gather") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); +}; + +struct TORCH_API gather_dimname_out { + using schema = at::Tensor & (const at::Tensor &, at::Dimname, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gather") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); +}; + +struct TORCH_API gather_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gather") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..67d83db26f9da694c4ce49025dfe38448a971d8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_native.h new file mode 100644 index 0000000000000000000000000000000000000000..549f7df0770387ca194f9b337a05644090658c85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & isnan_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor isnan(const at::Tensor & self); +TORCH_API at::Tensor isnan_sparse(const at::Tensor & self); +TORCH_API at::Tensor isnan_sparse_csr(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e8877bf4f3df907b3ff15e55f0a75eaa0279f9d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..175d42d9973e19b376104b3804796f04468beb27 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logaddexp2_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logaddexp2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API logaddexp2 { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logaddexp2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logaddexp2(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..244f48cd663a39d1492af353b51a829c9c22712d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & logical_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..04ea12298a75b2eef1df0122bbbdd74cb3dbdf69 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool2d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool2d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2c82029f0839c00fd1c2be32092ab83307a1a434 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple mkldnn_linear_backward_weights_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined); +TORCH_API ::std::tuple mkldnn_linear_backward_weights_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0bae829e9ce8532c642233056505a7ed4f91d66a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple mkldnn_linear_backward_weights_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple mkldnn_linear_backward_weights(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0ce51e73b51cf25c46e75793d9556cba6b1370ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple mps_convolution_transpose_backward_out_symint(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..16bfb8250459a5c5e3ff861b3bba8743e2e1dff0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor multi_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multi_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multi_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight, int64_t reduction, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ab2a4599a5e8d6f82b006669865531dd8d81ad58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_native.h @@ -0,0 +1,36 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional & p, at::ScalarType dtype); +TORCH_API at::Tensor & norm_ScalarOpt_dtype_out(const at::Tensor & self, const c10::optional & p, at::ScalarType dtype, at::Tensor & out); +TORCH_API at::Tensor norm(const at::Tensor & self, const at::Scalar & p=2); +TORCH_API at::Tensor & norm_Scalar_out(const at::Tensor & self, const at::Scalar & p, at::Tensor & out); +struct TORCH_API structured_norm_dtype_out : public at::meta::structured_norm_ScalarOpt_dim_dtype { +void impl(const at::Tensor & self, at::OptionalScalarRef p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, const at::Tensor & out); +}; +TORCH_API at::Tensor sparse_dtype_norm(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype); +struct TORCH_API structured_norm_out : public at::meta::structured_norm_ScalarOpt_dim { +void impl(const at::Tensor & self, at::OptionalScalarRef p, at::IntArrayRef dim, bool keepdim, const at::Tensor & out); +}; +TORCH_API at::Tensor sparse_norm(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype); +TORCH_API at::Tensor & norm_out(const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out); +TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim=false); +TORCH_API at::Tensor & norm_out(const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pow.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pow.h new file mode 100644 index 0000000000000000000000000000000000000000..ad1ac48e3282ae2b623d8601b43c40bffcad8b6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pow.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::pow_Tensor_Tensor_out::call(self, exponent, out); +} +// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pow_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::pow_Tensor_Tensor_out::call(self, exponent, out); +} + +// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor +inline at::Tensor pow(const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::pow_Tensor_Tensor::call(self, exponent); +} + +// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pow_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::pow_Scalar_out::call(self, exponent, out); +} +// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pow_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::pow_Scalar_out::call(self, exponent, out); +} + +// aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor +inline at::Tensor pow(const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::pow_Scalar::call(self, exponent); +} + +// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::pow_Tensor_Scalar_out::call(self, exponent, out); +} +// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) { + return at::_ops::pow_Tensor_Scalar_out::call(self, exponent, out); +} + +// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor +inline at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::pow_Tensor_Scalar::call(self, exponent); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_scales_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_scales_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..67e66f46107ed40f8ee5928043c750223438a708 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_scales_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API q_per_channel_scales { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::q_per_channel_scales") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "q_per_channel_scales(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API q_per_channel_scales_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::q_per_channel_scales") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randperm_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randperm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4c069b6cb715b0082597f8781d194b4e592b6ef6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randperm_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & randperm_out(at::Tensor & out, int64_t n, c10::optional generator); +TORCH_API at::Tensor & randperm_outf(int64_t n, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & randperm_symint_out(at::Tensor & out, c10::SymInt n, c10::optional generator); +TORCH_API at::Tensor & randperm_symint_outf(c10::SymInt n, c10::optional generator, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/remainder_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/remainder_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7f2a43e8230cdaf3c79ae77207f60732bb46832a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/remainder_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor remainder(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & remainder_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a5395ed964a9709a66d32f4523c5a761fdf8cb75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor replication_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor & replication_pad1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_copy_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2fb60af30977bb554399da21cc72232950cba239 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & row_indices_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & row_indices_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rsqrt_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rsqrt_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c8ca18bbb3faa59670c89914618871d56e53b668 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rsqrt_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor rsqrt(const at::Tensor & self); +TORCH_API at::Tensor & rsqrt_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & rsqrt_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & rsqrt_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/size_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/size_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b8a3fc58301808f83e630b80f3ebe252b0ea31d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/size_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API int64_t size(const at::Tensor & self, int64_t dim); +TORCH_API int64_t size(const at::Tensor & self, at::Dimname dim); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slice_scatter.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slice_scatter.h new file mode 100644 index 0000000000000000000000000000000000000000..08e58a347e86f6a50c79b79b3322d29b082fa9c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slice_scatter.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +inline at::Tensor slice_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_scatter::call(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); +} +namespace symint { + template ::value>> + at::Tensor slice_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_scatter::call(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); + } +} + +// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +inline at::Tensor slice_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_scatter::call(self, src, dim, start, end, step); +} +namespace symint { + template ::value>> + at::Tensor slice_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_scatter::call(self, src, dim, start, end, step); + } +} + +// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slice_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_scatter_out::call(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); +} +namespace symint { + template ::value>> + at::Tensor & slice_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_scatter_out::call(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); + } +} + +// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slice_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, int64_t step, at::Tensor & out) { + return at::_ops::slice_scatter_out::call(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); +} +namespace symint { + template ::value>> + at::Tensor & slice_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, int64_t step, at::Tensor & out) { + return at::_ops::slice_scatter_out::call(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); + } +} + +// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slice_scatter_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_scatter_out::call(self, src, dim, start, end, step, out); +} +namespace symint { + template ::value>> + at::Tensor & slice_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_scatter_out::call(self, src, dim, start, end, step, out); + } +} + +// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slice_scatter_symint_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, at::Tensor & out) { + return at::_ops::slice_scatter_out::call(self, src, dim, start, end, step, out); +} +namespace symint { + template ::value>> + at::Tensor & slice_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, at::Tensor & out) { + return at::_ops::slice_scatter_out::call(self, src, dim, start, end, step, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated3d_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..51c83f1b07faf194616e92efdf3fc717b85ffc84 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated3d_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & slow_conv_dilated3d_out_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out); +TORCH_API at::Tensor slow_conv_dilated3d_cpu(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor slow_conv_dilated3d_cuda(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d77cc0b1cf4783b5ff658b0c96f383f0c6e7f9d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor soft_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +TORCH_API at::Tensor & soft_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +TORCH_API at::Tensor & soft_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_coo_tensor_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_coo_tensor_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf974e1e223a6d7fca1209094ae140e718491dc2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_coo_tensor_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor sparse_coo_tensor(at::IntArrayRef size, at::TensorOptions options); +TORCH_API at::Tensor sparse_coo_tensor(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & sparse_coo_tensor_out(at::Tensor & out, at::IntArrayRef size); +TORCH_API at::Tensor & sparse_coo_tensor_outf(at::IntArrayRef size, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_coo_tensor_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_coo_tensor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5add164112795468bc6859f97f2d3ea7f9b153e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_coo_tensor_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sparse_coo_tensor_size { + using schema = at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_coo_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "size") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor") + static at::Tensor call(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API sparse_coo_tensor_indices { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_coo_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor") + static at::Tensor call(const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); +}; + +struct TORCH_API sparse_coo_tensor_indices_size { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_coo_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "indices_size") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor") + static at::Tensor call(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); +}; + +struct TORCH_API sparse_coo_tensor_size_out { + using schema = at::Tensor & (at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_coo_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "size_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::IntArrayRef size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..4aeb2ac1e3984889b8808b62deedaae349686ccb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_bessel_y1 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9fb8b6964ddbe116c55bf04a174e76d808feb56b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_chebyshev_polynomial_w { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_w") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n); +}; + +struct TORCH_API special_chebyshev_polynomial_w_x_scalar { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_w") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor") + static at::Tensor call(const at::Scalar & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n); +}; + +struct TORCH_API special_chebyshev_polynomial_w_n_scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_w") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Scalar & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n); +}; + +struct TORCH_API special_chebyshev_polynomial_w_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_w") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_chebyshev_polynomial_w_x_scalar_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_w") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_chebyshev_polynomial_w_n_scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_w") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_softmax_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_softmax_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8b64eac2e58168dcc25814fa7eb617b4ffcc007f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_softmax_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor special_log_softmax(const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes.h new file mode 100644 index 0000000000000000000000000000000000000000..85b7ff2c5441ba13bbc45491977bada93b227c80 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] +inline ::std::vector split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes::call(self, c10::fromIntArrayRefSlow(split_sizes), dim); +} +namespace symint { + template ::value>> + ::std::vector split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes::call(self, c10::fromIntArrayRefSlow(split_sizes), dim); + } +} + +// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] +inline ::std::vector split_with_sizes_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes::call(self, split_sizes, dim); +} +namespace symint { + template ::value>> + ::std::vector split_with_sizes(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes::call(self, split_sizes, dim); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_size_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_size_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..44634d72c4541d512729ac492766758ccfc3c657 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_size_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API c10::SymInt sym_size(const at::Tensor & self, int64_t dim); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/thnn_conv2d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/thnn_conv2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bde82795be209be5c4b55f2b4aa8f0631d1b1cc9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/thnn_conv2d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API thnn_conv2d_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, const c10::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::thnn_conv2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out); +}; + +struct TORCH_API thnn_conv2d { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, const c10::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::thnn_conv2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triplet_margin_loss_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triplet_margin_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..382d559a3e3e091157fbdc58c81069cbab021091 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triplet_margin_loss_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor triplet_margin_loss(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin=1.0, double p=2, double eps=1e-06, bool swap=false, int64_t reduction=at::Reduction::Mean); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c6bb219fa79714c4d70dfdadc90e1151c0ffe4a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor upsample_bilinear2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); +TORCH_API at::Tensor upsample_bilinear2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); + +} // namespace compositeimplicitautograd +} // namespace at