diff --git a/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..f657caf4f890991ac65fce40dc4986245c634491 --- /dev/null +++ b/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7368f25a7b878753f55774862db32d991227597d61124a8ddde4fa45dc108283 +size 33555612 diff --git a/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..11d3cfa724a0eb0315e2e9870e4085dd93f13377 --- /dev/null +++ b/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9198054c070761a5a942b87a85238431556787a72aac455b56907d36514ce8f0 +size 33555627 diff --git a/ckpts/universal/global_step20/zero/14.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/14.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..566da6d10ed61dd3ba220b444ccf7a49cfc85b4c --- /dev/null +++ b/ckpts/universal/global_step20/zero/14.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:766bec39e5a7812df879534d50a361d9db3d64fc2d21c5655624a8318a042d21 +size 33555627 diff --git a/ckpts/universal/global_step20/zero/4.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step20/zero/4.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..8d7c5c76395ee82169a0352b76b0f6382a097a1a --- /dev/null +++ b/ckpts/universal/global_step20/zero/4.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:675865555dac4a1e9d000c54fa5114da9028ae071bf6c0041fc278e490480b67 +size 9293 diff --git a/ckpts/universal/global_step20/zero/8.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step20/zero/8.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..da17643d63bf5e7d0fb32e25dc5ae280b31d0817 --- /dev/null +++ b/ckpts/universal/global_step20/zero/8.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22feb26e680a223765b177bd293c6662ea7bc96a52ff3d2e741496e88afebeed +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_add_batch_dim_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_add_batch_dim_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..93c39445fbfedc557d3f1d09bf801804a7184872 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_add_batch_dim_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _add_batch_dim(const at::Tensor & self, int64_t batch_dim, int64_t level); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a55f4c0c9c01b5890a78123a8ca2d87674983e50 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_addmm_activation_out_cpu : public at::meta::structured__addmm_activation { +void impl(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, const at::Tensor & out); +}; +struct TORCH_API structured_addmm_activation_out_cuda : public at::meta::structured__addmm_activation { +void impl(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_aminmax_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_aminmax_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b2760bff6b9b44a045ee857fd07d1bd526ed6cc9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_aminmax_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _aminmax_out(const at::Tensor & self, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple _aminmax_all(const at::Tensor & self); +TORCH_API ::std::tuple _aminmax_dim_out(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple _aminmax(const at::Tensor & self, int64_t dim, bool keepdim=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_forward_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_forward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d26c0af3aa91ff7049501deb6d52691d9e537c92 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_forward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _cdist_forward_out(at::Tensor & out, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode); +TORCH_API at::Tensor & _cdist_forward_outf(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..10de8c1711d10b80ed972785e29c58ad77270584 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _ctc_loss_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_ctc_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor") + static at::Tensor call(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity); +}; + +struct TORCH_API _ctc_loss_backward_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_ctc_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor") + static at::Tensor call(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity); +}; + +struct TORCH_API _ctc_loss_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_ctc_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_forward_only_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_forward_only_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2b92fd940a35bfe6b1e44c0f300224bdd3661b1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_forward_only_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _embedding_bag_forward_only(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_abs.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_abs.h new file mode 100644 index 0000000000000000000000000000000000000000..d32a492b20587ec24a1b3021c6def522ec24449b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_abs.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_abs(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_abs(at::TensorList self) { + return at::_ops::_foreach_abs::call(self); +} + +// aten::_foreach_abs_(Tensor(a!)[] self) -> () +inline void _foreach_abs_(at::TensorList self) { + return at::_ops::_foreach_abs_::call(self); +} + +// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_abs_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_abs_out::call(self, out); +} +// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_abs_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_abs_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a2ac11bb174e43b743d043ea3927c56a8f963f4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_cos_out(at::TensorList self, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_cos_slow(at::TensorList self); +TORCH_API void foreach_tensor_cos_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_cos_cuda(at::TensorList self); +TORCH_API void foreach_tensor_cos_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_minimum_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_minimum_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0a02bf4d210c4f769a948b751f39d6b16bbb26ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_minimum_native.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_minimum_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_clamp_max_scalar_kernel_slow(at::TensorList self, const at::Scalar & scalar); +TORCH_API void foreach_tensor_clamp_max_scalar_kernel_slow_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector foreach_tensor_clamp_max_scalar_kernel_cuda(at::TensorList self, const at::Scalar & scalar); +TORCH_API void foreach_tensor_clamp_max_scalar_kernel_cuda_(at::TensorList self, const at::Scalar & scalar); +TORCH_API void _foreach_minimum_List_out(at::TensorList self, at::TensorList other, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_clamp_max_list_kernel_slow(at::TensorList self, at::TensorList other); +TORCH_API void foreach_tensor_clamp_max_list_kernel_slow_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector foreach_tensor_clamp_max_list_kernel_cuda(at::TensorList self, at::TensorList other); +TORCH_API void foreach_tensor_clamp_max_list_kernel_cuda_(at::TensorList self, at::TensorList other); +TORCH_API void _foreach_minimum_ScalarList_out(at::TensorList self, at::ArrayRef scalars, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_clamp_max_scalarlist_kernel_slow(at::TensorList self, at::ArrayRef scalars); +TORCH_API void foreach_tensor_clamp_max_scalarlist_kernel_slow_(at::TensorList self, at::ArrayRef scalars); +TORCH_API ::std::vector foreach_tensor_clamp_max_scalarlist_kernel_cuda(at::TensorList self, at::ArrayRef scalars); +TORCH_API void foreach_tensor_clamp_max_scalarlist_kernel_cuda_(at::TensorList self, at::ArrayRef scalars); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9e088db3e3e5886f2ade39caa41abd051174502c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_sinh { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sinh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sinh(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_sinh_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sinh_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sinh_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_sinh_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sinh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e76048965ddab344ad9285f932ca154881df83e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_trunc(at::TensorList self); +TORCH_API void _foreach_trunc_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam.h new file mode 100644 index 0000000000000000000000000000000000000000..09a067936d524f0a6fdbd4263c0e935fb346c560 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam.h @@ -0,0 +1,63 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () +inline void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +// aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () +inline void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam__tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} +// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} + +// aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) +inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} +// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adam_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} + +// aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) +inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_zerotensor_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_zerotensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bb3590051f05066ad5758deffc93bbef31fef76a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_zerotensor_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool _is_zerotensor(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_reshape_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_reshape_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8d7b091eb044d7b4805a1d4317044dbe734bf666 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_reshape_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _mkldnn_reshape { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_mkldnn_reshape") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_mkldnn_reshape(Tensor self, int[] shape) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef shape); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape); +}; + +struct TORCH_API _mkldnn_reshape_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_mkldnn_reshape") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..d09e8a602ac74a0eeb74fc962efedb95b89b7304 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_neg_view_copy(Tensor self) -> Tensor +inline at::Tensor _neg_view_copy(const at::Tensor & self) { + return at::_ops::_neg_view_copy::call(self); +} + +// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _neg_view_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::_neg_view_copy_out::call(self, out); +} +// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _neg_view_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::_neg_view_copy_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..716da905e0c1d7f29f9416237ab040cf2979f911 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _nested_sum_backward_cpu(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_forward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_forward.h new file mode 100644 index 0000000000000000000000000000000000000000..c7f6c94f164792adcf506b9b428e24177bfa09e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_forward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_pdist_forward(Tensor self, float p=2) -> Tensor +inline at::Tensor _pdist_forward(const at::Tensor & self, double p=2) { + return at::_ops::_pdist_forward::call(self, p); +} + +// aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _pdist_forward_out(at::Tensor & out, const at::Tensor & self, double p=2) { + return at::_ops::_pdist_forward_out::call(self, p, out); +} +// aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _pdist_forward_outf(const at::Tensor & self, double p, at::Tensor & out) { + return at::_ops::_pdist_forward_out::call(self, p, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sample_dirichlet.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sample_dirichlet.h new file mode 100644 index 0000000000000000000000000000000000000000..a912e8945ad3bb8b4770a79da7c0b15ee507b479 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sample_dirichlet.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor +inline at::Tensor _sample_dirichlet(const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_sample_dirichlet::call(self, generator); +} + +// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sample_dirichlet_out(at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_sample_dirichlet_out::call(self, generator, out); +} +// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sample_dirichlet_outf(const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::_sample_dirichlet_out::call(self, generator, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sparse_matmul_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sparse_matmul_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8fe13e9be8a8a7fd454c42e9086be6ab256daaa7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sparse_matmul_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_sparse_matmul { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_sparse_matmul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API _sparse_sparse_matmul_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_sparse_matmul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aa7afd5472d9ccfaba42c9a733eed3c5d96f49c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _test_autograd_multiple_dispatch(const at::Tensor & self, bool b); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b50c4004cb24d0cc17e04e608bb1d6438441fab5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _transformer_encoder_layer_fwd_out(at::Tensor & out, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask={}, c10::optional mask_type=c10::nullopt); +TORCH_API at::Tensor & _transformer_encoder_layer_fwd_outf(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e139801693bb40534468e6913afea195aa40b8fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _upsample_bilinear2d_aa(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); +TORCH_API at::Tensor _upsample_bilinear2d_aa_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..ae2c9c5e64b971b9127cb25b99bb4e1b62ae142c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured__upsample_nearest_exact1d_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, c10::optional scales); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c938c24c6076493b391af4028cf5ed7f6ddce976 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b707f93a43a82622e0204acccded9aaca39d5e62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _weight_norm_interface(const at::Tensor & v, const at::Tensor & g, int64_t dim=0); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..57c0d6e74969cdbd57373b088e9dd2fcc9b22354 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/any_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/any_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..d5f9e3bd67b810def4f3338d9e8b2841760ce740 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/any_meta.h @@ -0,0 +1,37 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_any_dim : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, bool keepdim); +}; +struct TORCH_API structured_any_dims : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim); +}; +struct TORCH_API structured_any : public at::impl::MetaBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..37b1a6ea327e4cff03bf29f93aa35368bc32f195 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & as_strided_copy_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, at::Tensor & out); +TORCH_API at::Tensor as_strided_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_reduce_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_reduce_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7f1ed3db998c1a7ca5b31ae5423832fcf154e761 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_reduce_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple batch_norm_backward_reduce_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g); +TORCH_API ::std::tuple batch_norm_backward_reduce_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..667acb87896549237393843f7373d202b8ba045d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & binary_cross_entropy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & binary_cross_entropy_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dc2b899f572ad76950d09fe9b8990e14f903c34e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor bitwise_xor(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_xor_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cross_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cross_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6ebd8716872ab89b0909acf29c4adafe5f75ed0b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cross_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cross_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cross") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, c10::optional dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional dim, at::Tensor & out); +}; + +struct TORCH_API cross { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cross") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cross(Tensor self, Tensor other, int? dim=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, c10::optional dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/elu_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/elu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..da2fb1d76600330e21d3707a1f33932c8ec07dd0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/elu_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor elu(const at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1); +TORCH_API at::Tensor & elu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1); +TORCH_API at::Tensor & elu_outf(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out); +TORCH_API at::Tensor & elu_(at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_renorm_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_renorm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7af0ff4477aa3ae9e08d0b200115fa6217872ee7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_renorm_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & embedding_renorm_(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_as_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_as_native.h new file mode 100644 index 0000000000000000000000000000000000000000..89e6fa63619746d28a3b513866c501ab3b34038d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_as_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor expand_as(const at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight.h new file mode 100644 index 0000000000000000000000000000000000000000..f9ccacf6ea7c179c5776496c14d1b17390337b0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor +inline at::Tensor fbgemm_linear_int8_weight(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) { + return at::_ops::fbgemm_linear_int8_weight::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_diagonal_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_diagonal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bd5d7e93fdf0e2c179c04ff93d80f88d33b691c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_diagonal_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & fill_diagonal_(at::Tensor & self, const at::Scalar & fill_value, bool wrap=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flip.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flip.h new file mode 100644 index 0000000000000000000000000000000000000000..ccd5f1cfb2f526088c980fe54ffe94a1d35305aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flip.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::flip(Tensor self, int[] dims) -> Tensor +inline at::Tensor flip(const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::flip::call(self, dims); +} + +// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & flip_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::flip_out::call(self, dims, out); +} +// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & flip_outf(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::flip_out::call(self, dims, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..714e2139cebc7f008273ffb499e6cc0aa12439d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple geqrf(const at::Tensor & self); +TORCH_API ::std::tuple geqrf_out(const at::Tensor & self, at::Tensor & a, at::Tensor & tau); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0.h new file mode 100644 index 0000000000000000000000000000000000000000..2b64824d76ebf22dc922f7c20809a5d55051edae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::i0(Tensor self) -> Tensor +inline at::Tensor i0(const at::Tensor & self) { + return at::_ops::i0::call(self); +} + +// aten::i0_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & i0_(at::Tensor & self) { + return at::_ops::i0_::call(self); +} + +// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::i0_out::call(self, out); +} +// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::i0_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e7ee7ff1358cd217a41ffcbcd86f24bd22f961c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_signed { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::is_signed") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "is_signed(Tensor self) -> bool") + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..320ab4dc80b6ab137ecbd1d394857acea4ce8921 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API le_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::le") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API le_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::le") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "le.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API le_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::le") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API le_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::le") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "le.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API le__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::le_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API le__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::le_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3adc307190db636f1019c168d0c65b244780feca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..79be6ba3e2f32dc84ffbfa214fe7736e6a4c9ed7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor linalg_solve_triangular(const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false); +TORCH_API at::Tensor & linalg_solve_triangular_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false); +TORCH_API at::Tensor & linalg_solve_triangular_outf(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a8c389e1448a558207f9016df2165a7c17212ed2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & log_normal_(at::Tensor & self, double mean=1, double std=2, c10::optional generator=c10::nullopt); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm.h new file mode 100644 index 0000000000000000000000000000000000000000..a2bcee47190d39b3bf61666cf4f0b2a9451c6336 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) +inline ::std::tuple lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); +} + +// aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) +inline ::std::tuple lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::lstm_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1fa15f464916f595e51672dae099617e8501b511 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API margin_ranking_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::margin_ranking_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor") + static at::Tensor call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..614b019b5e59d73f4536bd9b4a95099a13138282 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor) +inline ::std::tuple mps_convolution_transpose_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask); + } +} + +// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor) +inline ::std::tuple mps_convolution_transpose_backward_symint(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask); + } +} + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mps_convolution_transpose_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); + } +} + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mps_convolution_transpose_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); + } +} + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mps_convolution_transpose_backward_symint_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); + } +} + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mps_convolution_transpose_backward_symint_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..a1c594c5ca07efae46db49d085b1831448ce2319 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & mse_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::mse_loss_backward_grad_input::call(grad_output, self, target, reduction, grad_input); +} +// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & mse_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) { + return at::_ops::mse_loss_backward_grad_input::call(grad_output, self, target, reduction, grad_input); +} + +// aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor +inline at::Tensor mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::mse_loss_backward::call(grad_output, self, target, reduction); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multiply.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multiply.h new file mode 100644 index 0000000000000000000000000000000000000000..fa2d655c88b7b03545cc2e712ac90be7deb85062 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multiply.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor multiply(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::multiply_Tensor::call(self, other); +} + +// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & multiply_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::multiply_out::call(self, other, out); +} +// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & multiply_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::multiply_out::call(self, other, out); +} + +// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor multiply(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::multiply_Scalar::call(self, other); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_channel_shuffle.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_channel_shuffle.h new file mode 100644 index 0000000000000000000000000000000000000000..2a00c78cb0f63a3f09ce0ea6c71dbb8589b2086d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_channel_shuffle.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor +inline at::Tensor native_channel_shuffle(const at::Tensor & self, int64_t groups) { + return at::_ops::native_channel_shuffle::call(self, groups); +} +namespace symint { + template ::value>> + at::Tensor native_channel_shuffle(const at::Tensor & self, int64_t groups) { + return at::_ops::native_channel_shuffle::call(self, groups); + } +} + +// aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor +inline at::Tensor native_channel_shuffle_symint(const at::Tensor & self, c10::SymInt groups) { + return at::_ops::native_channel_shuffle::call(self, groups); +} +namespace symint { + template ::value>> + at::Tensor native_channel_shuffle(const at::Tensor & self, c10::SymInt groups) { + return at::_ops::native_channel_shuffle::call(self, groups); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/new_zeros_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/new_zeros_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1a38921e52e17cda7b4745e3c209388b4a79b3b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/new_zeros_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor new_zeros(const at::Tensor & self, at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & new_zeros_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pairwise_distance_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pairwise_distance_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cb5d0d1487c0babb782167f6ab08a691eac1e875 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pairwise_distance_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor pairwise_distance(const at::Tensor & x1, const at::Tensor & x2, double p=2, double eps=1e-06, bool keepdim=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c674af8a2adf0ffec8d5d028834e12824b3b6da0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor permute(const at::Tensor & self, at::IntArrayRef dims); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/polar_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/polar_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..91c660afadeaf1ee14cdc9089276ea35223c6e0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/polar_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & polar_out(at::Tensor & out, const at::Tensor & abs, const at::Tensor & angle); +TORCH_API at::Tensor & polar_outf(const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8619284282355d401b9e3b44a6e940bca8f19d3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API range_step { + using schema = at::Tensor (const at::Scalar &, const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::range") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "step") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API range { + using schema = at::Tensor (const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::range") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(const at::Scalar & start, const at::Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API range_out_ { + using schema = at::Tensor & (const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::range") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & start, const at::Scalar & end, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, at::Tensor & out); +}; + +struct TORCH_API range_out { + using schema = at::Tensor & (const at::Scalar &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::range") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..00228f75958c9f5c739e52fbb7b91d4c15a9769e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor reflection_pad1d(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor reflection_pad1d_symint(const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & reflection_pad1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5d24a8093e936026b59bf58ba2979a680f95baaa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_reflection_pad3d_backward_out_cpu : public at::meta::structured_reflection_pad3d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::ArrayRef padding, const at::Tensor & grad_input); +}; +struct TORCH_API structured_reflection_pad3d_backward_out_cuda : public at::meta::structured_reflection_pad3d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::ArrayRef padding, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/requires_grad_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/requires_grad_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..50a6aafd48f3e2814b60b16ebecab6a273772b3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/requires_grad_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor & requires_grad_(at::Tensor & self, bool requires_grad=true); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/round_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/round_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..86a75239266fc6bda26a08a6afce47f0b5cfa22d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/round_cuda_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor round(const at::Tensor & self); +TORCH_API at::Tensor & round_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & round_(at::Tensor & self); +TORCH_API at::Tensor round(const at::Tensor & self, int64_t decimals); +TORCH_API at::Tensor & round_out(at::Tensor & out, const at::Tensor & self, int64_t decimals); +TORCH_API at::Tensor & round_outf(const at::Tensor & self, int64_t decimals, at::Tensor & out); +TORCH_API at::Tensor & round_(at::Tensor & self, int64_t decimals); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0d10ef2c21026d9d8af13a9a4f700d0529767ca3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rrelu { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rrelu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator); +}; + +struct TORCH_API rrelu_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rrelu_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sign_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sign_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a1ea09f5b7ab748eaf490155a5e0dc97fc8e2e68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sign_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor sign(const at::Tensor & self); +TORCH_API at::Tensor & sign_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/silu_backward_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/silu_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..58a4faae53d0da60a7f053763381a8cb520c7418 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/silu_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor silu_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_forward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_forward.h new file mode 100644 index 0000000000000000000000000000000000000000..60cf09b121f1102a6a5731ea962b0f1ee72be358 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_forward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +inline at::Tensor & slow_conv3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output); + } +} + +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +inline at::Tensor & slow_conv3d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output); + } +} + +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +inline at::Tensor & slow_conv3d_forward_symint_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output); + } +} + +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +inline at::Tensor & slow_conv3d_forward_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_forward_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output); + } +} + +// aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor +inline at::Tensor slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::slow_conv3d_forward::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); +} +namespace symint { + template ::value>> + at::Tensor slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::slow_conv3d_forward::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); + } +} + +// aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor +inline at::Tensor slow_conv3d_forward_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, padding); +} +namespace symint { + template ::value>> + at::Tensor slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, padding); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3eb33af46206a3b4d12d2332a335fd95656e83d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_ndtri(const at::Tensor & self); +TORCH_API at::Tensor & special_ndtri_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_ndtri_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bf0450777a795e56925f4d0a3df3c21072055353 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API split_Tensor { + using schema = ::std::vector (const at::Tensor &, c10::SymInt, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::split") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]") + static ::std::vector call(const at::Tensor & self, c10::SymInt split_size, int64_t dim); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim); +}; + +struct TORCH_API split_sizes { + using schema = ::std::vector (const at::Tensor &, c10::SymIntArrayRef, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::split") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "sizes") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]") + static ::std::vector call(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/t_copy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/t_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..def5e5a6f7af49a8b3b3a4f2f05e9913d7a64238 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/t_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::t_copy(Tensor self) -> Tensor +inline at::Tensor t_copy(const at::Tensor & self) { + return at::_ops::t_copy::call(self); +} + +// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & t_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::t_copy_out::call(self, out); +} +// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & t_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::t_copy_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/t_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/t_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a02a3b122d993eb9454259426c3fe070398c55ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/t_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor t(const at::Tensor & self); +TORCH_API at::Tensor & t_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/take_along_dim_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/take_along_dim_native.h new file mode 100644 index 0000000000000000000000000000000000000000..25a0650ed30041a9f3f9ff98d5223f07c5d2b9eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/take_along_dim_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor take_along_dim(const at::Tensor & self, const at::Tensor & indices, c10::optional dim=c10::nullopt); +TORCH_API at::Tensor & take_along_dim_out(const at::Tensor & self, const at::Tensor & indices, c10::optional dim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triu_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3a1d23e298a1571c122ee3400c39e54d17f55167 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triu_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor triu(const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & triu_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & triu_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out); +TORCH_API at::Tensor & triu_(at::Tensor & self, int64_t diagonal=0); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e40608a17924f371a08b7d97304c1d5fb65b6786 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor upsample_bicubic2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_bicubic2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & upsample_bicubic2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2e0820ec7a5203a41fe4802e5dda105490489d34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_upsample_bilinear2d_backward_out_cpu : public at::meta::structured_upsample_bilinear2d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, const at::Tensor & grad_input); +}; +struct TORCH_API structured_upsample_bilinear2d_backward_out_cuda : public at::meta::structured_upsample_bilinear2d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d620ae02220ec212d370b752a222f38ab412220a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_bilinear2d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_bilinear2d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +}; + +struct TORCH_API upsample_bilinear2d_backward { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_bilinear2d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..9857a527871c27b90366d30f9857dd061dc2fa8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w, grad_input); + } +} + +// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_trilinear3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_trilinear3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w, grad_input); + } +} + +// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_trilinear3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input); + } +} + +// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_trilinear3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_trilinear3d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input); + } +} + +// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w); + } +} + +// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor upsample_trilinear3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor upsample_trilinear3d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/vdot_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/vdot_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..323c083f5b5346aac90afbb5582c2c8f68aab0de --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/vdot_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor vdot(const at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at