diff --git a/ckpts/universal/global_step20/zero/11.attention.dense.weight/fp32.pt b/ckpts/universal/global_step20/zero/11.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..1e196b247ca2f2a3f3191f64271702ce99c2b25f --- /dev/null +++ b/ckpts/universal/global_step20/zero/11.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e70afa67165b81d7ae22f4e960f71620fd8a09703ffc4eebbce37bf93519b898 +size 16778317 diff --git a/ckpts/universal/global_step20/zero/21.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/21.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..e989b2c9862ee9fc8460a2d08805640cc10f54bf --- /dev/null +++ b/ckpts/universal/global_step20/zero/21.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c53bd697140c568792ca705be8c039e6c17bfa5f60e6a8b38dc860841afcac96 +size 16778396 diff --git a/ckpts/universal/global_step20/zero/21.attention.dense.weight/fp32.pt b/ckpts/universal/global_step20/zero/21.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..dc26e412b10ace468ea90549800e69cd223b4d26 --- /dev/null +++ b/ckpts/universal/global_step20/zero/21.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db3dcd2f112443928798ee75085103d067a6d9f727962f9b6f1f9971c4392521 +size 16778317 diff --git a/ckpts/universal/global_step20/zero/22.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/22.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..2ab2138c4c5d229bb3da61b16320510a7f6270f8 --- /dev/null +++ b/ckpts/universal/global_step20/zero/22.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f23b19201d0178505e723c70f6ba0682ab954a3c9bed7e769fb04ed41d0cc859 +size 9387 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_tensor_metadata_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_tensor_metadata_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..09e5d5508a6d248f87e875070718524aef82ef02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_tensor_metadata_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _assert_tensor_metadata { + using schema = void (const at::Tensor &, at::OptionalSymIntArrayRef, at::OptionalSymIntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_assert_tensor_metadata") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None) -> ()") + static void call(const at::Tensor & a, at::OptionalSymIntArrayRef size, at::OptionalSymIntArrayRef stride, c10::optional dtype); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & a, at::OptionalSymIntArrayRef size, at::OptionalSymIntArrayRef stride, c10::optional dtype); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..a4f3b86ade0e60b6c8c72439ed3a39d6be0710e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor +inline at::Tensor _cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) { + return at::_ops::_cdist_backward::call(grad, x1, x2, p, cdist); +} + +// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _cdist_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) { + return at::_ops::_cdist_backward_out::call(grad, x1, x2, p, cdist, out); +} +// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _cdist_backward_outf(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) { + return at::_ops::_cdist_backward_out::call(grad, x1, x2, p, cdist, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c594adb91a3404f3e1d3fb6c8f25a416a4657ce3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max_ops.h @@ -0,0 +1,116 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_clamp_max_Scalar { + using schema = ::std::vector (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]") + static ::std::vector call(at::TensorList self, const at::Scalar & scalar); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_clamp_max__Scalar { + using schema = void (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_clamp_max_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_clamp_max_List { + using schema = ::std::vector (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::TensorList other); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other); +}; + +struct TORCH_API _foreach_clamp_max__List { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_clamp_max_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()") + static void call(at::TensorList self, at::TensorList other); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other); +}; + +struct TORCH_API _foreach_clamp_max_ScalarList { + using schema = ::std::vector (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::ArrayRef scalars); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_clamp_max__ScalarList { + using schema = void (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_clamp_max_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_clamp_max_Scalar_out { + using schema = void (at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out); +}; + +struct TORCH_API _foreach_clamp_max_List_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList other, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out); +}; + +struct TORCH_API _foreach_clamp_max_ScalarList_out { + using schema = void (at::TensorList, at::ArrayRef, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ffb3740521948cb9132945021204345427ef1b42 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_trunc(at::TensorList self); +TORCH_API void _foreach_trunc_(at::TensorList self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..82fd19c70f022d615634b61487af8e957aaa0968 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _fused_moving_avg_obs_fq_helper(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aa91848397008a42ce309455a1625c7c52110e89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _fw_primal(const at::Tensor & self, int64_t level); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true.h new file mode 100644 index 0000000000000000000000000000000000000000..d40e5c3748f80ccd978279a6a81be09dd7b7d9be --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_is_any_true(Tensor self) -> Tensor +inline at::Tensor _is_any_true(const at::Tensor & self) { + return at::_ops::_is_any_true::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a445539e587c8fdb618afa159222015bb50eb9f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dep_token_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dep_token_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bbabac5dfe46cdee5b8ba4d42c12609e80afcfa5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dep_token_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _make_dep_token { + using schema = at::Tensor (c10::optional, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_make_dep_token") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor") + static at::Tensor call(c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..db3bcec2901141a19dc6f51f6e7267cdd2220528 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _make_dual_copy_out(at::Tensor & out, const at::Tensor & primal, const at::Tensor & tangent, int64_t level); +TORCH_API at::Tensor & _make_dual_copy_outf(const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..df11b0092912751e633872f750ccccef3f7ea6e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor +inline at::Tensor _masked_softmax_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim=c10::nullopt) { + return at::_ops::_masked_softmax_backward::call(grad_output, output, mask, dim); +} + +// aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _masked_softmax_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim=c10::nullopt) { + return at::_ops::_masked_softmax_backward_out::call(grad_output, output, mask, dim, out); +} +// aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _masked_softmax_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim, at::Tensor & out) { + return at::_ops::_masked_softmax_backward_out::call(grad_output, output, mask, dim, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3dcbe996a58c09c9e9f2cb313e03a7b1c6210884 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _mkldnn_transpose { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_mkldnn_transpose") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim0, int64_t dim1); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1); +}; + +struct TORCH_API _mkldnn_transpose_ { + using schema = at::Tensor & (at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_mkldnn_transpose_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t dim0, int64_t dim1); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1); +}; + +struct TORCH_API _mkldnn_transpose_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_mkldnn_transpose") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_scramble_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_scramble_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7494182c93d04456e963a3f89f6fd7d0e81b4e37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_scramble_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _sobol_engine_scramble_(at::Tensor & self, const at::Tensor & ltm, int64_t dimension); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view.h new file mode 100644 index 0000000000000000000000000000000000000000..2c57d15583a1550f06108392e23fab57b247d346 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a) +inline at::Tensor _test_autograd_multiple_dispatch_view(const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_view::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8761374045dc87b24f0b0c9694582365c29ebd9a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _test_functorch_fallback_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor _test_functorch_fallback(const at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..724b43957443d9ffff9c916d8903562934646956 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor _upsample_bicubic2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int4pack_mm_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int4pack_mm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6b66c1efd1dc5342716d4aff2d6dbaa37a098c03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int4pack_mm_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _weight_int4pack_mm(const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScaleAndZeros); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int8pack_mm_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int8pack_mm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7ac33b545328d8f208236ff3a749a347d32a2fa7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int8pack_mm_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _weight_int8pack_mm(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scales); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..522b8be9f491f9cee5a8ebe81fde98ae339a7576 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _weight_norm(const at::Tensor & v, const at::Tensor & g, int64_t dim=0); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca344b5ebe18466e734d683f09dda1cf55b87576 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/add.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/add.h new file mode 100644 index 0000000000000000000000000000000000000000..80a6df08bf53e66e67c2b5ddbbb72fe24b7781e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/add.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +inline at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::add_Tensor::call(self, other, alpha); +} + +// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::add_out::call(self, other, alpha, out); +} +// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::add_out::call(self, other, alpha, out); +} + +// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +inline at::Tensor add(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::add_Scalar::call(self, other, alpha); +} + +// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::add_Scalar_out::call(self, other, alpha, out); +} +// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & add_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::add_Scalar_out::call(self, other, alpha, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d1b3c8e5b72945c9ed2e4f4fd10155f81dec7e82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcmul_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmin_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmin_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1bede8da2134ee04fbf12b4ed5143a9726503a02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmin_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor argmin(const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false); +TORCH_API at::Tensor & argmin_out(at::Tensor & out, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false); +TORCH_API at::Tensor & argmin_outf(const at::Tensor & self, c10::optional dim, bool keepdim, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8cccc5161ee51d88e2db99dca34ff66462da036f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple batch_norm_stats_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, double eps); +TORCH_API ::std::tuple batch_norm_stats_outf(const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b435cbfd512be92deabaa6e7c714a1dca95f7810 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API binary_cross_entropy_with_logits { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binary_cross_entropy_with_logits") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction); +}; + +struct TORCH_API binary_cross_entropy_with_logits_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binary_cross_entropy_with_logits") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..97f3942329d44568fc5a3b9ed43ed61fbeb7573e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor bitwise_not(const at::Tensor & self); +TORCH_API at::Tensor & bitwise_not_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & bitwise_not_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & bitwise_not_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..63f3cb00bfa9ba7188c7c2b6cf1a0d170b7036a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor cosh(const at::Tensor & self); +TORCH_API at::Tensor & cosh_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosine_embedding_loss_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosine_embedding_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..51f2da4418505e61128802642a05415b785a85b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosine_embedding_loss_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor cosine_embedding_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose_native.h new file mode 100644 index 0000000000000000000000000000000000000000..55bfe1d981b1d276686d0d19be784437cc0c9458 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & cudnn_convolution_transpose_out_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out); +TORCH_API at::Tensor cudnn_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/empty_quantized_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/empty_quantized_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6846fbf7213f906f92e013fccd278e02d20408c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/empty_quantized_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & empty_quantized_out(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional memory_format, at::Tensor & out); +TORCH_API at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eq_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eq_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ee920d04ed4f4443863897fb7b48bc4d8792a0cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eq_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor eq(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & eq_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & eq_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor eq(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & eq_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & eq_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/equal_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/equal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..74af76ef345ce977a9533dddf97d32ff10e8266e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/equal_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API equal { + using schema = bool (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::equal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "equal(Tensor self, Tensor other) -> bool") + static bool call(const at::Tensor & self, const at::Tensor & other); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..468e4e5fac8da3f019a6ae52c5bb480692de0df4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor erfc(const at::Tensor & self); +TORCH_API at::Tensor & erfc_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & erfc_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & erfc_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/exp_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/exp_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..0fb15f44bc794f0c59bced73b98ff5b967ac35da --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/exp_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_exp : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eye_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eye_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1524e9b37caeadfe3d165e83ff6e4d4d6432c549 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eye_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor eye(int64_t n, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & eye_out_cpu(int64_t n, at::Tensor & out); +TORCH_API at::Tensor & eye_out_cuda(int64_t n, at::Tensor & out); +TORCH_API at::Tensor eye(int64_t n, int64_t m, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & eye_out_cpu(int64_t n, int64_t m, at::Tensor & out); +TORCH_API at::Tensor & eye_out_cuda(int64_t n, int64_t m, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ebad60dec6402dc014e118e83432eb34582205be --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_ifft_symint(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_ifft_symint_out(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5b4dc96b6c1a7f4697b74304f3e4579aa7c2a9c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_irfft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_irfft2_symint_out(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..73aa1ed5a954297106af6832677d047638b5b990 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..874077418e01e2b8d38a4823e6d2f03feefffdd1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & grid_sampler_3d_out(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); +TORCH_API at::Tensor grid_sampler_3d_cpu(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API at::Tensor grid_sampler_3d_cuda(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f29b0293645726c227b2264e4eaf866a1e179a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardswish(const at::Tensor & self); +TORCH_API at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardswish_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh.h new file mode 100644 index 0000000000000000000000000000000000000000..764c572659fac897a22fc6622b873ea006f9d14d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_out::call(self, min_val, max_val, out); +} +// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) { + return at::_ops::hardtanh_out::call(self, min_val, max_val, out); +} + +// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor +inline at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh::call(self, min_val, max_val); +} + +// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) +inline at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_::call(self, min_val, max_val); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col.h new file mode 100644 index 0000000000000000000000000000000000000000..bab54186b8a3ced89328464f8d800ba5e3d7303b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col_out::call(self, kernel_size, dilation, padding, stride, out); +} +// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::im2col_out::call(self, kernel_size, dilation, padding, stride, out); +} + +// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor +inline at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col::call(self, kernel_size, dilation, padding, stride); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e981e5b71aac0bb01137f06c8d5e8c0a7dd33ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API bool is_pinned(const at::Tensor & self, c10::optional device=c10::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..67645c74f872ed505d143f35e6b67929eb133236 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_vulkan_available(); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf.h new file mode 100644 index 0000000000000000000000000000000000000000..b5cef9a951653312a7132a301bc3c1c739b3cb54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isposinf(Tensor self) -> Tensor +inline at::Tensor isposinf(const at::Tensor & self) { + return at::_ops::isposinf::call(self); +} + +// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::isposinf_out::call(self, out); +} +// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::isposinf_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3e7152e3146bdfa4350304d5acf36e1fc50b66da --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, bool log_target=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8078540fd9c25c3778b4e63ac557775b2a16fd56 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_cholesky(const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & linalg_cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & linalg_cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0cbd5eeeda1c426ab17da1833e0eaa768604858a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lu_solve { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_lu_solve") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor") + static at::Tensor call(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint); +}; + +struct TORCH_API linalg_lu_solve_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_lu_solve") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f1d241106625fdb5e189b90f751d24f13ca30263 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_matrix_norm(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out); +TORCH_API at::Tensor linalg_matrix_norm(const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linspace_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linspace_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a1165d7d92872c593482ad21e3e2ddbef3d13e7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linspace_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps); +TORCH_API at::Tensor & linspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3741e1fcc10dc6bae759a6ddc1e950c2f79a7123 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple max(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); +TORCH_API at::Tensor max(const at::Tensor & self); +TORCH_API at::Tensor & max_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum.h new file mode 100644 index 0000000000000000000000000000000000000000..d4f7ef80bf80fbec22a0cc4528d9dc7d996868c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::maximum(Tensor self, Tensor other) -> Tensor +inline at::Tensor maximum(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::maximum::call(self, other); +} + +// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::maximum_out::call(self, other, out); +} +// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::maximum_out::call(self, other, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/polygamma_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/polygamma_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..76f8045fc928132c6e03d2b045102282ae8c8486 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/polygamma_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API polygamma_out { + using schema = at::Tensor & (int64_t, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::polygamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t n, const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API polygamma { + using schema = at::Tensor (int64_t, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::polygamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "polygamma(int n, Tensor self) -> Tensor") + static at::Tensor call(int64_t n, const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self); +}; + +struct TORCH_API polygamma_ { + using schema = at::Tensor & (at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::polygamma_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "polygamma_(Tensor(a!) self, int n) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t n); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t n); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/put_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/put_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9b438ada9b48aab21d35c3146568fd834b5a017c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/put_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor put(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false); +TORCH_API at::Tensor & put_out(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out); +TORCH_API at::Tensor & put_(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_zero_point_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_zero_point_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3b77ce2b68ecc5584993895556117b5abef957c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_zero_point_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API q_zero_point { + using schema = int64_t (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::q_zero_point") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "q_zero_point(Tensor self) -> int") + static int64_t call(const at::Tensor & self); + static int64_t redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..382ce43161562695db7a43e6e9f023e94df95c72 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor quantize_per_channel(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ravel.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ravel.h new file mode 100644 index 0000000000000000000000000000000000000000..364f3fcf425a04bffa01c2a90f37ffdf7909d0b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ravel.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::ravel(Tensor(a) self) -> Tensor(a) +inline at::Tensor ravel(const at::Tensor & self) { + return at::_ops::ravel::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f2e65de4a991db4c1132b8c85307311a0a013f1e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor reflection_pad3d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a7e30d1eda0df1983ecf8f6962706f11b7c8570a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_replication_pad1d_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, at::ArrayRef padding); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rsqrt_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rsqrt_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3ccb0fa3c384c74917708eeb05a75e789f6f22e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rsqrt_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor rsqrt(const at::Tensor & self); +TORCH_API at::Tensor & rsqrt_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & rsqrt_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & rsqrt_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/searchsorted_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/searchsorted_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e9fb57caec623d0e9cf84d81b74a2c54314e9bff --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/searchsorted_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API searchsorted_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, c10::optional, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::searchsorted") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor") + static at::Tensor call(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter); +}; + +struct TORCH_API searchsorted_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, c10::optional, const c10::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::searchsorted") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out); +}; + +struct TORCH_API searchsorted_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, bool, bool, c10::optional, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::searchsorted") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor") + static at::Tensor call(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter); +}; + +struct TORCH_API searchsorted_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, bool, bool, c10::optional, const c10::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::searchsorted") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/silu_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/silu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..e290400adfda920d65a7dce32b06700144766610 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/silu_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & silu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::silu_backward_grad_input::call(grad_output, self, grad_input); +} +// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & silu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) { + return at::_ops::silu_backward_grad_input::call(grad_output, self, grad_input); +} + +// aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor +inline at::Tensor silu_backward(const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::silu_backward::call(grad_output, self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8c6fdde84b89dcb823d2780e454327577281a142 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor smooth_l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double beta=1.0); +TORCH_API at::Tensor & smooth_l1_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double beta=1.0); +TORCH_API at::Tensor & smooth_l1_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..10c9ba1890176a1bef91d339012260a5628d1c46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_airy_ai(const at::Tensor & x); +TORCH_API at::Tensor & special_airy_ai_out(at::Tensor & out, const at::Tensor & x); +TORCH_API at::Tensor & special_airy_ai_outf(const at::Tensor & x, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e852b8fc352d1507d82691f818b4a032895567b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_bessel_y1 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_bessel_y1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_bessel_y1(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_bessel_y1_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_bessel_y1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_spherical_bessel_j0_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_spherical_bessel_j0_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7fb626c20d23c09626e04750994bd40d815b4ea2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_spherical_bessel_j0_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_spherical_bessel_j0 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_spherical_bessel_j0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_spherical_bessel_j0(Tensor x) -> Tensor") + static at::Tensor call(const at::Tensor & x); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x); +}; + +struct TORCH_API special_spherical_bessel_j0_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_spherical_bessel_j0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_copy_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8b7eb181051fc30d3e5e8745be9e1b718999e292 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor squeeze_copy(const at::Tensor & self); +TORCH_API at::Tensor squeeze_copy(const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor squeeze_copy(const at::Tensor & self, at::IntArrayRef dim); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/stft_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/stft_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4e69782a2307cd91f493b358a7475c61e88a715f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/stft_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor stft(const at::Tensor & self, int64_t n_fft, c10::optional hop_length=c10::nullopt, c10::optional win_length=c10::nullopt, const c10::optional & window={}, bool normalized=false, c10::optional onesided=c10::nullopt, c10::optional return_complex=c10::nullopt); +TORCH_API at::Tensor stft(const at::Tensor & self, int64_t n_fft, c10::optional hop_length=c10::nullopt, c10::optional win_length=c10::nullopt, const c10::optional & window={}, bool center=true, c10::string_view pad_mode="reflect", bool normalized=false, c10::optional onesided=c10::nullopt, c10::optional return_complex=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range.h new file mode 100644 index 0000000000000000000000000000000000000000..fad7ca9d57eea769d7ccec818c7d0b08118d1f1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sym_constrain_range(Scalar size, *, int? min=None, int? max=None) -> () +inline void sym_constrain_range(const at::Scalar & size, c10::optional min=c10::nullopt, c10::optional max=c10::nullopt) { + return at::_ops::sym_constrain_range::call(size, min, max); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_storage_offset.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_storage_offset.h new file mode 100644 index 0000000000000000000000000000000000000000..2ce04dfaec1d7b22676e83d931c2c27386a3cac8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_storage_offset.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sym_storage_offset(Tensor self) -> SymInt +inline c10::SymInt __dispatch_sym_storage_offset(const at::Tensor & self) { + return at::_ops::sym_storage_offset::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/t_copy_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/t_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b4c63fe18fbe439b7be67c05ff099dc96fd0846 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/t_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor t_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_padded_tensor_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_padded_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9a3092ae8356d2432db503e2f00d9602db117113 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_padded_tensor_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & to_padded_tensor_out_symint(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor NestedTensor_to_padded_tensor_generic(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt); +TORCH_API at::Tensor NestedTensor_to_padded_tensor_cuda(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triu_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..287d5d336a5a40a60bb1cf54a0174cd2c3296b39 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triu_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor triu(const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & triu_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & triu_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out); +TORCH_API at::Tensor & triu_(at::Tensor & self, int64_t diagonal=0); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trunc_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trunc_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c36aad0d9191c31e729785b461ceb6e2293b7774 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trunc_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor trunc(const at::Tensor & self); +TORCH_API at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & trunc_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/zero_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/zero_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..df855179aadf1a32590043648366061b905b3699 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/zero_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API zero_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::zero_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "zero_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API zero_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::zero") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API zero { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::zero") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "zero(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops