diff --git a/ckpts/universal/global_step120/zero/12.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step120/zero/12.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..6f21df20eba7f744e1a455e634f19fe8af5ca748 --- /dev/null +++ b/ckpts/universal/global_step120/zero/12.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2236cdf0e56594c1fc581de10bf14767229ac68d429bee9aefbadf311ee9009 +size 33555533 diff --git a/ckpts/universal/global_step120/zero/13.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/13.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..41a67d6403a488903052d83e2707dbf7f34939db --- /dev/null +++ b/ckpts/universal/global_step120/zero/13.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9a65e6d54ed834ccec82c742da5d00259dc2fc60cc09d0a6f870932b80b2137 +size 9372 diff --git a/ckpts/universal/global_step120/zero/13.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/13.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..9c316a83a93938e022664e437dc25b917e084cc1 --- /dev/null +++ b/ckpts/universal/global_step120/zero/13.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca3eb1b6cb30da2eab57a87ee4089b6535ab9a8781b465f0e049cb4850220784 +size 9293 diff --git a/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..3032f2c06c0511d79e5c607fcfb24787da349ebc --- /dev/null +++ b/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de324d3cfd7977b6db43de74c212043f8efa612a76ac18953c4ad885d6b5c7ae +size 33555612 diff --git a/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..61a1e5771448ca3a9e4ba8735e4bdbae2fdea154 --- /dev/null +++ b/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edc32b1a5fa6ca2b9513e740549aa27413274834e72b02d87ec12e648becda43 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_weight_to_int4pack_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_weight_to_int4pack_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..14df73b867581d86492a36a9154747711d7d952f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_weight_to_int4pack_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _convert_weight_to_int4pack(const at::Tensor & self, int64_t innerKTiles); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2c.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2c.h new file mode 100644 index 0000000000000000000000000000000000000000..c8c2382ed20bcf9be7df6c9da6e57b2a343d3560 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2c.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor +inline at::Tensor _fft_c2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c::call(self, c10::fromIntArrayRefSlow(dim), normalization, forward); +} +namespace symint { + template ::value>> + at::Tensor _fft_c2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c::call(self, c10::fromIntArrayRefSlow(dim), normalization, forward); + } +} + +// aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor +inline at::Tensor _fft_c2c_symint(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c::call(self, dim, normalization, forward); +} +namespace symint { + template ::value>> + at::Tensor _fft_c2c(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c::call(self, dim, normalization, forward); + } +} + +// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _fft_c2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c_out::call(self, c10::fromIntArrayRefSlow(dim), normalization, forward, out); +} +namespace symint { + template ::value>> + at::Tensor & _fft_c2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c_out::call(self, c10::fromIntArrayRefSlow(dim), normalization, forward, out); + } +} + +// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _fft_c2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) { + return at::_ops::_fft_c2c_out::call(self, c10::fromIntArrayRefSlow(dim), normalization, forward, out); +} +namespace symint { + template ::value>> + at::Tensor & _fft_c2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) { + return at::_ops::_fft_c2c_out::call(self, c10::fromIntArrayRefSlow(dim), normalization, forward, out); + } +} + +// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _fft_c2c_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c_out::call(self, dim, normalization, forward, out); +} +namespace symint { + template ::value>> + at::Tensor & _fft_c2c_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c_out::call(self, dim, normalization, forward, out); + } +} + +// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _fft_c2c_symint_outf(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) { + return at::_ops::_fft_c2c_out::call(self, dim, normalization, forward, out); +} +namespace symint { + template ::value>> + at::Tensor & _fft_c2c_outf(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) { + return at::_ops::_fft_c2c_out::call(self, dim, normalization, forward, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a97528c8505429e4d9ef7d040bbc9a55196397c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div_ops.h @@ -0,0 +1,149 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_div_Scalar { + using schema = ::std::vector (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]") + static ::std::vector call(at::TensorList self, const at::Scalar & scalar); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_div__Scalar { + using schema = void (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_div_List { + using schema = ::std::vector (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::TensorList other); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other); +}; + +struct TORCH_API _foreach_div__List { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()") + static void call(at::TensorList self, at::TensorList other); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other); +}; + +struct TORCH_API _foreach_div_ScalarList { + using schema = ::std::vector (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::ArrayRef scalars); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_div__ScalarList { + using schema = void (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_div_Tensor { + using schema = ::std::vector (at::TensorList, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[]") + static ::std::vector call(at::TensorList self, const at::Tensor & other); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other); +}; + +struct TORCH_API _foreach_div__Tensor { + using schema = void (at::TensorList, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> ()") + static void call(at::TensorList self, const at::Tensor & other); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other); +}; + +struct TORCH_API _foreach_div_Scalar_out { + using schema = void (at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out); +}; + +struct TORCH_API _foreach_div_List_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList other, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out); +}; + +struct TORCH_API _foreach_div_ScalarList_out { + using schema = void (at::TensorList, at::ArrayRef, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out); +}; + +struct TORCH_API _foreach_div_Tensor_out { + using schema = void (at::TensorList, const at::Tensor &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, const at::Tensor & other, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..24eba928cd953e16fc3a8f3d1a5ee564dff491f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_neg { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_neg") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_neg(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_neg_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_neg_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_neg_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_neg_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_neg") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..57a2cd5c1377ee2f80dfb9811f9d291045dc0bdd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_reciprocal_out(at::TensorList self, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_reciprocal_slow(at::TensorList self); +TORCH_API void foreach_tensor_reciprocal_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_reciprocal_cuda(at::TensorList self); +TORCH_API void foreach_tensor_reciprocal_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..869e332c1ad1572a531200d0c0357919ccd74972 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) +inline ::std::tuple _fused_moving_avg_obs_fq_helper(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::_fused_moving_avg_obs_fq_helper::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); +} + +// aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!)) +inline ::std::tuple _fused_moving_avg_obs_fq_helper_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::_fused_moving_avg_obs_fq_helper_out::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1); +} +// aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!)) +inline ::std::tuple _fused_moving_avg_obs_fq_helper_outf(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_fused_moving_avg_obs_fq_helper_out::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1); +} + +// aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out) +inline ::std::tuple _fused_moving_avg_obs_fq_helper_functional(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3b88038a894ffdd320f80a82157573ad7a4f8de4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API int64_t _fused_sdp_choice_cpp(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, c10::optional scale=c10::nullopt); +TORCH_API int64_t _fused_sdp_choice_cuda(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, c10::optional scale=c10::nullopt); +TORCH_API int64_t _fused_sdp_choice_meta(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, c10::optional scale=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_zerotensor_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_zerotensor_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0e3a56b4da9dbd935545f14c0c75439ff2e24528 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_zerotensor_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool _is_zerotensor(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dep_token.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dep_token.h new file mode 100644 index 0000000000000000000000000000000000000000..344d2abcdbbd389b19c52a414018097d5bf2a914 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dep_token.h @@ -0,0 +1,34 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor _make_dep_token(at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::_make_dep_token::call(c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +// aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor _make_dep_token(c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::_make_dep_token::call(dtype, layout, device, pin_memory, memory_format); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..35797d98e6910468be914b727050c5fce3a5113a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _nested_view_from_buffer(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_enum_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_enum_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8e8d53679fbd9b1972a308481393a306c27b6092 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_enum_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _pad_enum_symint(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional value=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b01f32fb5305c6461636dff8ba4aac97a0ec350 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _scaled_mm(const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias={}, c10::optional out_dtype=c10::nullopt, const c10::optional & scale_a={}, const c10::optional & scale_b={}, const c10::optional & scale_result={}, bool use_fast_accum=false); +TORCH_API ::std::tuple _scaled_mm_out(at::Tensor & out, at::Tensor & out_amax, const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias={}, c10::optional out_dtype=c10::nullopt, const c10::optional & scale_a={}, const c10::optional & scale_b={}, const c10::optional & scale_result={}, bool use_fast_accum=false); +TORCH_API ::std::tuple _scaled_mm_outf(const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias, c10::optional out_dtype, const c10::optional & scale_a, const c10::optional & scale_b, const c10::optional & scale_result, bool use_fast_accum, at::Tensor & out, at::Tensor & out_amax); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_shape_as_tensor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_shape_as_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..ad9ff8f05257b38c56897e73a1b722de5c5388c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_shape_as_tensor.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_shape_as_tensor(Tensor self) -> Tensor +inline at::Tensor _shape_as_tensor(const at::Tensor & self) { + return at::_ops::_shape_as_tensor::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..67cfde367693393d9872bb36fb3b9a19d2cb135a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor +inline at::Tensor _sparse_broadcast_to_copy(const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_sparse_broadcast_to_copy::call(self, size); +} + +// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_broadcast_to_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_sparse_broadcast_to_copy_out::call(self, size, out); +} +// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_broadcast_to_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_sparse_broadcast_to_copy_out::call(self, size, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ed58e4095abf8ec8c648672af915bc4bb8867bc3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional is_coalesced=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..12344f8e996d845a269b83e45926aef4b540b189 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_csc_tensor_unsafe { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_csc_tensor_unsafe") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_floatlist_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_floatlist_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2433411da053e76d8351ce47dedd6d46bebe8786 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_floatlist_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _test_optional_floatlist(const at::Tensor & values, c10::optional> addends); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..55dab0a1dd288eacbd8a6df857734a573bbcdbb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple _thnn_fused_lstm_cell_backward(const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..37896513bdea080fb91181de9432586330c07be7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _thnn_fused_lstm_cell_backward { + using schema = ::std::tuple (const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_thnn_fused_lstm_cell_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dfca440d76738771364beb06ff6c9eab75ce784c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales, at::Tensor & out); +TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_compressed_sparse_indices_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_compressed_sparse_indices_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..97a07a59888e7fe4d2e02c72980a92249d7b6b03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_compressed_sparse_indices_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API void _validate_compressed_sparse_indices(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/absolute_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/absolute_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e42fb8193b250136b6f6b5b96c95ac7a0a1b7834 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/absolute_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor absolute(const at::Tensor & self); +TORCH_API at::Tensor & absolute_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & absolute_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & absolute_(at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..93b04891e650fa468be661ac36820ce3caa9f141 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor adaptive_avg_pool2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arcsinh.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arcsinh.h new file mode 100644 index 0000000000000000000000000000000000000000..6158f557a02ce79dbc07b585c9592faa7959dc0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arcsinh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::arcsinh(Tensor self) -> Tensor +inline at::Tensor arcsinh(const at::Tensor & self) { + return at::_ops::arcsinh::call(self); +} + +// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & arcsinh_(at::Tensor & self) { + return at::_ops::arcsinh_::call(self); +} + +// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arcsinh_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::arcsinh_out::call(self, out); +} +// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arcsinh_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::arcsinh_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arctan2_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arctan2_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b10f365d03b774d0904a0defab63a07f693f1090 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arctan2_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor arctan2(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & arctan2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & arctan2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & arctan2_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asinh_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asinh_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..44e3ec7c5c7848bdc2e93db75911380a1a65c102 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asinh_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor asinh(const at::Tensor & self); +TORCH_API at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & asinh_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_3d_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..789559bffc0dec9036553ed376d9fcc2a218f9ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_3d_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor atleast_3d(const at::Tensor & self); +TORCH_API ::std::vector atleast_3d(at::TensorList tensors); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..950aa3a2203e73e66f115bc5def1021d4f53ca36 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + return at::_ops::avg_pool3d_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); +} +// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input) { + return at::_ops::avg_pool3d_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); +} + +// aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor +inline at::Tensor avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + return at::_ops::avg_pool3d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosine_embedding_loss_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosine_embedding_loss_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4d0ba4dfe25b56ffb9194baa860da847561502a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosine_embedding_loss_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor cosine_embedding_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumulative_trapezoid_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumulative_trapezoid_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0e78ed19fef37909b2a0d7b5f87fecacdf735909 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumulative_trapezoid_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1); +TORCH_API at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Scalar & dx=1, int64_t dim=-1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4c46a3a0d4adf99f198377d053e280143b60c8f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API digamma_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::digamma_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "digamma_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API digamma_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::digamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API digamma { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::digamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "digamma(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand.h new file mode 100644 index 0000000000000000000000000000000000000000..cc066dac3efe1cba6e32c20a8ec8a28948229574 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +namespace symint { + template ::value>> + at::Tensor expand(const at::Tensor & self, at::IntArrayRef size, bool implicit=false) { + return at::_ops::expand::call(self, c10::fromIntArrayRefSlow(size), implicit); + } +} + +namespace symint { + template ::value>> + at::Tensor expand(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit=false) { + return at::_ops::expand::call(self, size, implicit); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft2.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft2.h new file mode 100644 index 0000000000000000000000000000000000000000..01ea4f3af76652be2b9f1e8b7967cb4797ce5174 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft2.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +inline at::Tensor fft_ihfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_ihfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } +} + +// aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +inline at::Tensor fft_ihfft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2::call(self, s, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_ihfft2(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2::call(self, s, dim, norm); + } +} + +// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & fft_ihfft2_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + const at::Tensor & fft_ihfft2_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & fft_ihfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_ihfft2_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + const at::Tensor & fft_ihfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_ihfft2_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & fft_ihfft2_symint_out(const at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + const at::Tensor & fft_ihfft2_out(const at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2_out::call(self, s, dim, norm, out); + } +} + +// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & fft_ihfft2_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_ihfft2_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + const at::Tensor & fft_ihfft2_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_ihfft2_out::call(self, s, dim, norm, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf3f261dcd2c9e65f308e42572ccfe7977b7dcdc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d.h new file mode 100644 index 0000000000000000000000000000000000000000..440d6525df8ad6af37c9b090075bb26fca50218a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor +inline at::Tensor grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_2d::call(input, grid, interpolation_mode, padding_mode, align_corners); +} + +// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & grid_sampler_2d_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_2d_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out); +} +// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & grid_sampler_2d_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { + return at::_ops::grid_sampler_2d_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window.h new file mode 100644 index 0000000000000000000000000000000000000000..947b6012b1365854d2d55d28ba88882be9a213ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window.h @@ -0,0 +1,97 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, at::TensorOptions options={}) { + return at::_ops::hamming_window::call(window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hamming_window::call(window_length, dtype, layout, device, pin_memory); +} + +// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic::call(window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hamming_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory); +} + +// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic_alpha::call(window_length, periodic, alpha, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hamming_window_periodic_alpha::call(window_length, periodic, alpha, dtype, layout, device, pin_memory); +} + +// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic_alpha_beta::call(window_length, periodic, alpha, beta, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hamming_window_periodic_alpha_beta::call(window_length, periodic, alpha, beta, dtype, layout, device, pin_memory); +} + +// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length) { + return at::_ops::hamming_window_out::call(window_length, out); +} +// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_outf(int64_t window_length, at::Tensor & out) { + return at::_ops::hamming_window_out::call(window_length, out); +} + +// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::hamming_window_periodic_out::call(window_length, periodic, out); +} +// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::hamming_window_periodic_out::call(window_length, periodic, out); +} + +// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha) { + return at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out); +} +// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, at::Tensor & out) { + return at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out); +} + +// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha, double beta) { + return at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out); +} +// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) { + return at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f6a35eabdf4b0273ee49d686ae0fa2620639b073 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API igamma_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::igamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API igamma { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::igamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "igamma(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API igamma_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::igamma_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..360e209a38b494a62fb6dedff9bfab1ccadeda6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_conj(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..66e4b0201fd7644127c7d49913e00f3048d0291e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isnan { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isnan") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isnan(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API isnan_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isnan") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3a9e04cdcdbb28e8e9554372af1631915e1cd7ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API kaiser_window { + using schema = at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::kaiser_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API kaiser_window_periodic { + using schema = at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::kaiser_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "periodic") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API kaiser_window_beta { + using schema = at::Tensor (int64_t, bool, double, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::kaiser_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "beta") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t window_length, bool periodic, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API kaiser_window_out { + using schema = at::Tensor & (int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::kaiser_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t window_length, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out); +}; + +struct TORCH_API kaiser_window_periodic_out { + using schema = at::Tensor & (int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::kaiser_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "periodic_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t window_length, bool periodic, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out); +}; + +struct TORCH_API kaiser_window_beta_out { + using schema = at::Tensor & (int64_t, bool, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::kaiser_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "beta_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t window_length, bool periodic, double beta, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d6ab0718ad722452ddd7be8d0277a9e2f490193d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_out(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular.h new file mode 100644 index 0000000000000000000000000000000000000000..6c9c57d1534f1a4020703f74b94b335316e21e89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_solve_triangular_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false) { + return at::_ops::linalg_solve_triangular_out::call(self, B, upper, left, unitriangular, out); +} +// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_solve_triangular_outf(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) { + return at::_ops::linalg_solve_triangular_out::call(self, B, upper, left, unitriangular, out); +} + +// aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor +inline at::Tensor linalg_solve_triangular(const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false) { + return at::_ops::linalg_solve_triangular::call(self, B, upper, left, unitriangular); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a1159b5c8ad75de8f4e9f98d3ccadb49b490b483 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps=c10::nullopt); +TORCH_API at::Tensor & logit_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps=c10::nullopt); +TORCH_API at::Tensor & logit_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bd30f1815b66f275c817ce71a9375f6d87d80c7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & logit_(at::Tensor & self, c10::optional eps=c10::nullopt); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc97d901e679b75ff979c0b1f6b4710c33a29c91 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor masked_select(const at::Tensor & self, const at::Tensor & mask); +TORCH_API at::Tensor & masked_select_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask); +TORCH_API at::Tensor & masked_select_outf(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..567d20779158cf2ecd6ba26885c85801be37f56e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d.h new file mode 100644 index 0000000000000000000000000000000000000000..44834f2e452abfa5b81d3eb6cd9a135b1d49ecfe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & max_unpool3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding, out); + } +} + +// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool3d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::max_unpool3d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & max_unpool3d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::max_unpool3d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding, out); + } +} + +// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d_out::call(self, indices, output_size, stride, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & max_unpool3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d_out::call(self, indices, output_size, stride, padding, out); + } +} + +// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool3d_symint_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::max_unpool3d_out::call(self, indices, output_size, stride, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & max_unpool3d_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::max_unpool3d_out::call(self, indices, output_size, stride, padding, out); + } +} + +// aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor +inline at::Tensor max_unpool3d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d::call(self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding); +} +namespace symint { + template ::value>> + at::Tensor max_unpool3d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d::call(self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding); + } +} + +// aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor +inline at::Tensor max_unpool3d_symint(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d::call(self, indices, output_size, stride, padding); +} +namespace symint { + template ::value>> + at::Tensor max_unpool3d(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d::call(self, indices, output_size, stride, padding); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a55e796448f13815f56d76661d23d5dd429fd195 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor mish_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..99acafa87fbd766cd2e3238da338cbb097ba60ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mish_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mish_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mish_backward(Tensor grad_output, Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..61244fc51a5b06a0f955b01f8da4d29f9d2d0095 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_max_pool3d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +struct TORCH_API mkldnn_max_pool3d_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nansum.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nansum.h new file mode 100644 index 0000000000000000000000000000000000000000..39681928979d4d6c8c4389e44bd2ad38098cfbc0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nansum.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor nansum(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nansum::call(self, dim, keepdim, dtype); +} + +// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nansum_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nansum_out::call(self, dim, keepdim, dtype, out); +} +// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nansum_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::nansum_out::call(self, dim, keepdim, dtype, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_copy_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_copy_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5eec61a5d0901e7005732bd22167ec529da433d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_copy_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length); +TORCH_API at::Tensor narrow_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); +TORCH_API at::Tensor & narrow_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t start, int64_t length); +TORCH_API at::Tensor & narrow_copy_outf(const at::Tensor & self, int64_t dim, int64_t start, int64_t length, at::Tensor & out); +TORCH_API at::Tensor & narrow_copy_symint_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); +TORCH_API at::Tensor & narrow_copy_symint_outf(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_dropout_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_dropout_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..87ec256922fa90e5bfaae39f3a3c0f6c151b3ce3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_dropout_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple native_dropout(const at::Tensor & input, double p, c10::optional train); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/neg_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/neg_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d23d9963d4dc909ba091a20a941581cc56b2c70e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/neg_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor neg(const at::Tensor & self); +TORCH_API at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & neg_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a49cea555f322ad11dbe5ac72db7a173d446f07e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor quantize_per_tensor(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype); +TORCH_API at::Tensor quantize_per_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype); +TORCH_API ::std::vector quantize_per_tensor(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bf2635577eb4f024527e298639d848ed21f38437 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor quantize_per_tensor(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype); +TORCH_API at::Tensor quantize_per_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand.h new file mode 100644 index 0000000000000000000000000000000000000000..e01f0fd64c0619ee6eb21db74bd89e42688603e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand.h @@ -0,0 +1,377 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory); + } +} + +// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_names::call(size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_names::call(size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_names::call(size, names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_names::call(size, names, dtype, layout, device, pin_memory); + } +} + +// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory); + } +} + +// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_generator_with_names::call(size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_generator_with_names::call(size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory); + } +} + +// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } +} + +// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::rand::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::rand::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand::call(size, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand::call(size, dtype, layout, device, pin_memory); + } +} + +// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); + } +} + +// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::rand_generator::call(size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::rand_generator::call(size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator::call(size, generator, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator::call(size, generator, dtype, layout, device, pin_memory); + } +} + +// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::rand_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::rand_out::call(size, out); + } +} + +// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::rand_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::rand_out::call(size, out); + } +} + +// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional generator) { + return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional generator) { + return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); + } +} + +// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_outf(at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); + } +} + +// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator) { + return at::_ops::rand_generator_out::call(size, generator, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator) { + return at::_ops::rand_generator_out::call(size, generator, out); + } +} + +// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::rand_generator_out::call(size, generator, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::rand_generator_out::call(size, generator, out); + } +} + +// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional names) { + return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional names) { + return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out); + } +} + +// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_outf(at::IntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(at::IntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out); + } +} + +// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional names) { + return at::_ops::rand_names_out::call(size, names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional names) { + return at::_ops::rand_names_out::call(size, names, out); + } +} + +// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::rand_names_out::call(size, names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(c10::SymIntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::rand_names_out::call(size, names, out); + } +} + +// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); + } +} + +// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_outf(at::IntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(at::IntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); + } +} + +// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::rand_generator_with_names_out::call(size, generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::rand_generator_with_names_out::call(size, generator, names, out); + } +} + +// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::rand_generator_with_names_out::call(size, generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::rand_generator_with_names_out::call(size, generator, names, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1176edc2208a7ef30b2265f30f8e60a194fdc232 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_scatter_reduce_two : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d9c8bfcdedc007857a313bdcd3cbccc003cf7842 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor sgn(const at::Tensor & self); +TORCH_API at::Tensor & sgn_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sin_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sin_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..54d727a1d18acd764c2c23145315de040eb7c75e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sin_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sin { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sin(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API sin_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sin_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sin_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API sin_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d.h new file mode 100644 index 0000000000000000000000000000000000000000..3d28d32af29852f46ef85857023fc741248b31bd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::slow_conv3d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::slow_conv3d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); + } +} + +// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::slow_conv3d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::slow_conv3d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); + } +} + +// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, padding, out); + } +} + +// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, padding, out); + } +} + +// aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor +inline at::Tensor slow_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::slow_conv3d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); +} +namespace symint { + template ::value>> + at::Tensor slow_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::slow_conv3d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); + } +} + +// aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor +inline at::Tensor slow_conv3d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, padding); +} +namespace symint { + template ::value>> + at::Tensor slow_conv3d(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, padding); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sort_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sort_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1ff1cb8a160ade0070e82bddb33e1cfe2673276d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sort_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_sort_stable : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, c10::optional stable, int64_t dim, bool descending); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c689d9db83a82ee0b47318376796e6ef7377278d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a942097b4f90295027b67145b06d4207cc292794 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor special_expit(const at::Tensor & self); +TORCH_API at::Tensor & special_expit_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i1.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i1.h new file mode 100644 index 0000000000000000000000000000000000000000..3a2ef63a79caf7b561ccae356322fa807cf0a16d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i1.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_modified_bessel_i1(Tensor self) -> Tensor +inline at::Tensor special_modified_bessel_i1(const at::Tensor & self) { + return at::_ops::special_modified_bessel_i1::call(self); +} + +// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_modified_bessel_i1_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_modified_bessel_i1_out::call(self, out); +} +// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_modified_bessel_i1_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_modified_bessel_i1_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_v.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_v.h new file mode 100644 index 0000000000000000000000000000000000000000..079cd6c4b33e785b85c60e5f4f77323642b85627 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_v.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor +inline at::Tensor special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v::call(x, n); +} + +// aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor +inline at::Tensor special_shifted_chebyshev_polynomial_v(const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar::call(x, n); +} + +// aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor +inline at::Tensor special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar::call(x, n); +} + +// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_shifted_chebyshev_polynomial_v_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_out::call(x, n, out); +} +// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_shifted_chebyshev_polynomial_v_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_v_out::call(x, n, out); +} + +// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_shifted_chebyshev_polynomial_v_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar_out::call(x, n, out); +} +// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_shifted_chebyshev_polynomial_v_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar_out::call(x, n, out); +} + +// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_shifted_chebyshev_polynomial_v_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar_out::call(x, n, out); +} +// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_shifted_chebyshev_polynomial_v_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar_out::call(x, n, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6b9c948b04b9f25d6627e1f3e5d35b06790cefb9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_sinc_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_sinc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0edd1b6e812d5e9455fadb7b106ef36922f0e5b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_sinc_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor special_sinc(const at::Tensor & self); +TORCH_API at::Tensor & special_sinc_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5c94a015ce795db8c13fcc9e603a7022da869738 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple std_mean(const at::Tensor & self, bool unbiased=true); +TORCH_API ::std::tuple std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased=true, bool keepdim=false); +TORCH_API ::std::tuple std_mean_correction_out(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false); +TORCH_API ::std::tuple std_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased=true, bool keepdim=false); +TORCH_API ::std::tuple std_mean(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensor_split_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensor_split_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f58d88b459f69ca5b4e0ac0efc56706876b04d0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensor_split_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API tensor_split_sections { + using schema = ::std::vector (const at::Tensor &, c10::SymInt, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::tensor_split") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "sections") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]") + static ::std::vector call(const at::Tensor & self, c10::SymInt sections, int64_t dim); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt sections, int64_t dim); +}; + +struct TORCH_API tensor_split_indices { + using schema = ::std::vector (const at::Tensor &, c10::SymIntArrayRef, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::tensor_split") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]") + static ::std::vector call(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim); +}; + +struct TORCH_API tensor_split_tensor_indices_or_sections { + using schema = ::std::vector (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::tensor_split") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_indices_or_sections") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]") + static ::std::vector call(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f21ec7f55aa70ca94e6c8c20ed185e905a06de24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold); +TORCH_API at::Tensor & threshold_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold); +TORCH_API at::Tensor & threshold_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8445a7e500683a99d64d60e761dd0d963b18e43f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +TORCH_API at::Tensor & threshold_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +TORCH_API at::Tensor & threshold_outf(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & threshold_(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_bsc_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_bsc_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3817b537a2560f73e3f98a2033ee04ad2a3f76ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_bsc_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_bsc_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_bsc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..82bad716832635cab9d008b96434276be305a0aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_bsc_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API to_sparse_bsc { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::to_sparse_bsc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..3c1dc571ee89da6e1136a732b135cf8ea1de89d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_nearest2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_nearest2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input); + } +} + +// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_nearest2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_nearest2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input); + } +} + +// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_nearest2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, output_size, input_size, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_nearest2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, output_size, input_size, scales_h, scales_w, grad_input); + } +} + +// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_nearest2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, output_size, input_size, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_nearest2d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, output_size, input_size, scales_h, scales_w, grad_input); + } +} + +// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w); + } +} + +// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor upsample_nearest2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor upsample_nearest2d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w); + } +} + +}