diff --git a/ckpts/universal/global_step20/zero/17.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step20/zero/17.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..663887c5f6808fb2296857b6da37a9c357976b0f --- /dev/null +++ b/ckpts/universal/global_step20/zero/17.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36264fb59ed53d51065e39f6d151751cf7aa40d907cf1d28511a95d5421572d6 +size 33555533 diff --git a/ckpts/universal/global_step20/zero/24.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/24.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..34841803ce4cbf88e5af144bd49b4210303ad9e8 --- /dev/null +++ b/ckpts/universal/global_step20/zero/24.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3358c2a4dda43004c68a154c57f846b4228bc05e9aba64043bfd58aa7720a21c +size 9387 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9a7db8a44d2501cceec25333fd1adc16226526d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _conj_physical { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_conj_physical") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_conj_physical(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API _conj_physical_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_conj_physical") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_coo_to_csr_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_coo_to_csr_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a37d413fe4524defa8e0aca19e705fb7f0ab8f15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_coo_to_csr_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor _convert_indices_from_coo_to_csr(const at::Tensor & self, int64_t size, bool out_int32=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2cf25c595a7ffdf209854ba78dcd1d57c74d20c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _cudnn_rnn(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state); +TORCH_API ::std::tuple _cudnn_rnn_symint(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c51274a7ae94d01774f3997df525d8c9a5399f41 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_lerp_List { + using schema = ::std::vector (at::TensorList, at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::TensorList tensors1, at::TensorList weights); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights); +}; + +struct TORCH_API _foreach_lerp__List { + using schema = void (at::TensorList, at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_lerp_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()") + static void call(at::TensorList self, at::TensorList tensors1, at::TensorList weights); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights); +}; + +struct TORCH_API _foreach_lerp_Scalar { + using schema = ::std::vector (at::TensorList, at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); +}; + +struct TORCH_API _foreach_lerp__Scalar { + using schema = void (at::TensorList, at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_lerp_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()") + static void call(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); +}; + +struct TORCH_API _foreach_lerp_List_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out); +}; + +struct TORCH_API _foreach_lerp_Scalar_out { + using schema = void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sin_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sin_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..67e7c425bc0736bc51a96b0e61af703d63e2b4aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sin_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_sin(at::TensorList self); +TORCH_API void _foreach_sin_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adamw_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adamw_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9026efb35a303ce3d98c194a062cfdf9f10ea84d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adamw_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _fused_adamw_ { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adamw_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); +}; + +struct TORCH_API _fused_adamw__tensor_lr { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adamw_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_lr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adamw_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); +}; + +struct TORCH_API _fused_adamw_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adamw") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out); +}; + +struct TORCH_API _fused_adamw { + using schema = ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adamw") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)") + static ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); + static ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); +}; + +struct TORCH_API _fused_adamw_tensor_lr_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adamw") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_lr_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adamw.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out); +}; + +struct TORCH_API _fused_adamw_tensor_lr { + using schema = ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const c10::optional &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adamw") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_lr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adamw.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)") + static ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); + static ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_has_same_storage_numel_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_has_same_storage_numel_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c7044f641bcfc262b3395ee87e179b5137018955 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_has_same_storage_numel_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _has_same_storage_numel { + using schema = bool (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_has_same_storage_numel") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_has_same_storage_numel(Tensor self, Tensor other) -> bool") + static bool call(const at::Tensor & self, const at::Tensor & other); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..88e6f5463126569b1f2eee165bf3d6001d031592 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor) +inline ::std::tuple _prelu_kernel_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) { + return at::_ops::_prelu_kernel_backward::call(grad_output, self, weight); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f07826d1595e93b983ffe2ff77d6a0dac3345870 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _prelu_kernel_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b3d3acc88549936300923dee53ded8cd81acea0c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _prelu_kernel_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5d8b15a18c9c63bdfa9032f9bdc3df55205fcfd3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _segment_reduce_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const c10::optional &, const c10::optional &, int64_t, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_segment_reduce_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor") + static at::Tensor call(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & offsets, int64_t axis, const c10::optional & initial); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & offsets, int64_t axis, const c10::optional & initial); +}; + +struct TORCH_API _segment_reduce_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const c10::optional &, const c10::optional &, int64_t, const c10::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_segment_reduce_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & offsets, int64_t axis, const c10::optional & initial, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & offsets, int64_t axis, const c10::optional & initial, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe.h new file mode 100644 index 0000000000000000000000000000000000000000..f7dc573a514232f2ddf88874d8f7a6827afb2cd8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe.h @@ -0,0 +1,69 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } +} + +// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _sparse_compressed_tensor_unsafe_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _sparse_compressed_tensor_unsafe_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm.h new file mode 100644 index 0000000000000000000000000000000000000000..bf3422e868395383616fd69ddc37f01f3470e4f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor +inline at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense) { + return at::_ops::_sparse_mm::call(sparse, dense); +} + +// aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor +inline at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) { + return at::_ops::_sparse_mm_reduce::call(sparse, dense, reduce); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4cb993d70d4143aff90075579995dee130b16542 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_mm_reduce_impl_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const at::Tensor &, ::std::array); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_mm_reduce_impl_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array output_mask); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array output_mask); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cfdee46ecfa73e7279e30184fa69b5b0b2b56d20 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _sparse_softmax(const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor _sparse_softmax(const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & _sparse_softmax_out(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); +TORCH_API at::Tensor softmax_sparse_cpu(const at::Tensor & self, int64_t dim, bool half_to_float); +TORCH_API at::Tensor softmax_sparse_cuda(const at::Tensor & self, int64_t dim, bool half_to_float); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1a5bd49bfad04b10d0badb985a7b673bdf50c847 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _standard_gamma_grad_out(const at::Tensor & self, const at::Tensor & output, at::Tensor & out); +TORCH_API at::Tensor _standard_gamma_grad_cpu(const at::Tensor & self, const at::Tensor & output); +TORCH_API at::Tensor _standard_gamma_grad_cuda(const at::Tensor & self, const at::Tensor & output); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_lstm_cell_backward_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_lstm_cell_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a3de17f51a03492e8dcf55a178601944fec2a8e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_lstm_cell_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple _thnn_differentiable_lstm_cell_backward(const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional & input_bias, const c10::optional & hidden_bias, const at::Tensor & cx, const at::Tensor & cy); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..75c3ff97ef48782c016460d009b4386445c5cfd4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); + } +} + +// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); + } +} + +// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input); + } +} + +// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input); + } +} + +// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor +inline at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales); +} +namespace symint { + template ::value>> + at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales); + } +} + +// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor +inline at::Tensor _upsample_nearest_exact1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, output_size, input_size, scales); +} +namespace symint { + template ::value>> + at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, output_size, input_size, scales); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b659eb7774b4d603a85050e440fcbb3662cfd7bc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _validate_sparse_compressed_tensor_args { + using schema = void (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Layout); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_validate_sparse_compressed_tensor_args") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()") + static void call(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addmm.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addmm.h new file mode 100644 index 0000000000000000000000000000000000000000..47bce97e74fcf515a5909c06ba2c1379bdec660c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addmm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmm_out::call(self, mat1, mat2, beta, alpha, out); +} +// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::addmm_out::call(self, mat1, mat2, beta, alpha, out); +} + +// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmm::call(self, mat1, mat2, beta, alpha); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amin_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amin_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ac8b8896b16ce13ef163cdeb9c57dfa74638e365 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amin_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor amin(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); +TORCH_API at::Tensor & amin_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); +TORCH_API at::Tensor & amin_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1012f175e35d33ba3c05b2f7c0aa8bc6a5605940 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API aminmax { + using schema = ::std::tuple (const at::Tensor &, c10::optional, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::aminmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)") + static ::std::tuple call(const at::Tensor & self, c10::optional dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim, bool keepdim); +}; + +struct TORCH_API aminmax_out { + using schema = ::std::tuple (const at::Tensor &, c10::optional, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::aminmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)") + static ::std::tuple call(const at::Tensor & self, c10::optional dim, bool keepdim, at::Tensor & min, at::Tensor & max); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim, bool keepdim, at::Tensor & min, at::Tensor & max); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atanh_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atanh_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..da31f81da8a4361e0da3b5c4746afd1f4bdbfc1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atanh_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_atanh : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8b4cfc2cb06af40dbecbe19c8aa6cf1bc6c2a745 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cdist.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cdist.h new file mode 100644 index 0000000000000000000000000000000000000000..3e47bfcec353107cd45a179d699112361a48ea6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cdist.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor +inline at::Tensor cdist(const at::Tensor & x1, const at::Tensor & x2, double p=2, c10::optional compute_mode=c10::nullopt) { + return at::_ops::cdist::call(x1, x2, p, compute_mode); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6dce56d7a063284b260be84593e773512f406aa6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_clamp_out : public at::meta::structured_clamp { +void impl(const at::Tensor & self, at::OptionalScalarRef min, at::OptionalScalarRef max, const at::Tensor & out); +}; +TORCH_API at::Tensor clamp_quantized_cpu(const at::Tensor & self, const c10::optional & min=c10::nullopt, const c10::optional & max=c10::nullopt); +struct TORCH_API structured_clamp_Tensor_out : public at::meta::structured_clamp_Tensor { +void impl(const at::Tensor & self, at::OptionalTensorRef min, at::OptionalTensorRef max, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummin.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummin.h new file mode 100644 index 0000000000000000000000000000000000000000..6a4cc74ff220faae339d6b35ea234b0513f535e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummin.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) +inline ::std::tuple cummin(const at::Tensor & self, int64_t dim) { + return at::_ops::cummin::call(self, dim); +} + +// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple cummin_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim) { + return at::_ops::cummin_out::call(self, dim, values, indices); +} +// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple cummin_outf(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::cummin_out::call(self, dim, values, indices); +} + +// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) +inline ::std::tuple cummin(const at::Tensor & self, at::Dimname dim) { + return at::_ops::cummin_dimname::call(self, dim); +} + +// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple cummin_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim) { + return at::_ops::cummin_dimname_out::call(self, dim, values, indices); +} +// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple cummin_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::cummin_dimname_out::call(self, dim, values, indices); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ea2b1cda955de4d5e5798d1ca8748328603ed0cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor cumsum(const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & cumsum_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & cumsum_outf(const at::Tensor & self, at::Dimname dim, c10::optional dtype, at::Tensor & out); +TORCH_API at::Tensor & cumsum_(at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..27b20e4ebb0046bb4dbe189d212ad2c5099dd946 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor digamma(const at::Tensor & self); +TORCH_API at::Tensor & digamma_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & digamma_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & digamma_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine.h new file mode 100644 index 0000000000000000000000000000000000000000..d3aabb82c9a37db4669e2861e31407fd5532eed7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor +inline at::Tensor fake_quantize_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8b15c129ebe178ce5ed6e1a17b61e2b2aa75bada --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ff4abff062f07b79ffb494a6bbe740126a26e72b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gelu_backward_out_cpu : public at::meta::structured_gelu_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, const at::Tensor & grad_input); +}; +struct TORCH_API structured_gelu_backward_out_cuda : public at::meta::structured_gelu_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, const at::Tensor & grad_input); +}; +TORCH_API at::Tensor gelu_backwards_nested(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor mkldnn_gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..b3e63d53610c179e2cf61ed81accce26e1914372 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) +inline ::std::tuple grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_3d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); +} + +// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple grid_sampler_3d_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_3d_backward_out::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); +} +// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple grid_sampler_3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::grid_sampler_3d_backward_out::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/group_norm_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/group_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2ca9137024842060accfd8f1e64d35928c92ac19 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/group_norm_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API group_norm { + using schema = at::Tensor (const at::Tensor &, int64_t, const c10::optional &, const c10::optional &, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::group_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor") + static at::Tensor call(const at::Tensor & input, int64_t num_groups, const c10::optional & weight, const c10::optional & bias, double eps, bool cudnn_enabled); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t num_groups, const c10::optional & weight, const c10::optional & bias, double eps, bool cudnn_enabled); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..00c64aa87a9c6a290b8681053625ec3fa275d978 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardtanh_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardtanh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +}; + +struct TORCH_API hardtanh { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardtanh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +}; + +struct TORCH_API hardtanh_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardtanh_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_native.h new file mode 100644 index 0000000000000000000000000000000000000000..71fbf4fa8a2ad04a91610a07eadd13c0d760da59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple histogram(const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +TORCH_API ::std::tuple histogram(const at::Tensor & self, int64_t bins=100, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(const at::Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..905280801729e29a788ef68b44f829e4d6b7837b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hspmm_sparse_cpu(const at::Tensor & mat1, const at::Tensor & mat2); +TORCH_API at::Tensor & hspmm_out_sparse_cpu(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out); +TORCH_API at::Tensor hspmm_sparse_cuda(const at::Tensor & mat1, const at::Tensor & mat2); +TORCH_API at::Tensor & hspmm_out_sparse_cuda(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8dbaa32cb2ee436b3f51b479dd9afdb3543388a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor imag(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index.h new file mode 100644 index 0000000000000000000000000000000000000000..766921bf327db2202cebedf8659ca5b8e7135426 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor +inline at::Tensor index(const at::Tensor & self, const c10::List> & indices) { + return at::_ops::index_Tensor::call(self, indices); +} + +// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List> & indices) { + return at::_ops::index_Tensor_out::call(self, indices, out); +} +// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_outf(const at::Tensor & self, const c10::List> & indices, at::Tensor & out) { + return at::_ops::index_Tensor_out::call(self, indices, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..607b87da284ec80f75d0fea4c4a6cf59842a1e5a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API indices { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "indices(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose.h new file mode 100644 index 0000000000000000000000000000000000000000..612ec2f9c97523dd700035ceb685855efe8a9198 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor +inline at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) { + return at::_ops::isclose::call(self, other, rtol, atol, equal_nan); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..dacd2090a7b719dcd219edcc50d79d6da64857d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor +inline at::Tensor l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::l1_loss::call(self, target, reduction); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp.h new file mode 100644 index 0000000000000000000000000000000000000000..239f63c6035e06bdd19aaede108c0204ee282dc0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_matrix_exp(Tensor self) -> Tensor +inline at::Tensor linalg_matrix_exp(const at::Tensor & self) { + return at::_ops::linalg_matrix_exp::call(self); +} + +// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_exp_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::linalg_matrix_exp_out::call(self, out); +} +// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_exp_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::linalg_matrix_exp_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..53f8c0e006956115019d0782156b78cf6b0237d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple log_sigmoid_forward_cpu(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out_cpu(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +TORCH_API ::std::tuple log_sigmoid_forward_cuda(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out_cuda(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aef1bb85631e27ff76d8d2b07a5d17450bd606b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor log_softmax(const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor log_softmax(const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ae79a206c288214e56356ac2da0d0f6618b8a823 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API meshgrid { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::meshgrid") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "meshgrid(Tensor[] tensors) -> Tensor[]") + static ::std::vector call(at::TensorList tensors); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API meshgrid_indexing { + using schema = ::std::vector (at::TensorList, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::meshgrid") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "indexing") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]") + static ::std::vector call(at::TensorList tensors, c10::string_view indexing); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, c10::string_view indexing); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ef76020b4adf0980026a6b24370fbabb09709467 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple miopen_batch_norm(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..00a3c03378d6e5c451026005c857bda8422fc4a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +inline ::std::tuple mps_convolution_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask); + } +} + +// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +inline ::std::tuple mps_convolution_backward_symint(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask); + } +} + +// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple mps_convolution_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1, out2); + } +} + +// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple mps_convolution_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::mps_convolution_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::mps_convolution_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1, out2); + } +} + +// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple mps_convolution_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward_out::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward_out::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2); + } +} + +// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple mps_convolution_backward_symint_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::mps_convolution_backward_out::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::mps_convolution_backward_out::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..93b0f165b1e19d592b7e57bd841686cf8d562386 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +TORCH_API at::Tensor & mse_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +TORCH_API at::Tensor & mse_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..50cd8d1307290074b934b7bd0d08da2259176db2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor multilabel_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target); +TORCH_API at::Tensor & multilabel_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target); +TORCH_API at::Tensor & multilabel_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multiply_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multiply_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a7c3737e20c0daba70dd8f2375d55205c6bd7275 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multiply_ops.h @@ -0,0 +1,72 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API multiply_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::multiply") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "multiply.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API multiply__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::multiply_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API multiply_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::multiply") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API multiply_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::multiply") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "multiply.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API multiply__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::multiply_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/new_full_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/new_full_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2a82033344402c0cec0c4de7cfeb81c767812dcd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/new_full_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API new_full { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::new_full") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API new_full_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::new_full") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_batch_norm_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_batch_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0a503ccde068093d546628d56bee3d9013908e9c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_batch_norm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API quantized_batch_norm { + using schema = at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, double, double, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantized_batch_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor") + static at::Tensor call(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point); +}; + +struct TORCH_API quantized_batch_norm_out { + using schema = at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, double, double, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantized_batch_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/random_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/random_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b487d0689da26128e61450d4cd71c2db80aeda9a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/random_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & random_(at::Tensor & self, int64_t from, c10::optional to, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & random_(at::Tensor & self, int64_t to, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & random_(at::Tensor & self, c10::optional generator=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7148564fb6c461a5afa965a12c3944cf91f5c89b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor reflection_pad1d(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor reflection_pad1d_symint(const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & reflection_pad1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c7edad53c3859f9ff33898fb85dea53dbdd0d47b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor replication_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor & replication_pad1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad3d_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8278ea07582228854b2fe728a27877fe8c081db8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API replication_pad3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::replication_pad3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); +}; + +struct TORCH_API replication_pad3d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::replication_pad3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_as_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_as_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..67d22befeea9385580314a4ea11690db7e6ffe5d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_as_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor reshape_as(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rshift_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rshift_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4f5a210ac0178a96d8604d319960a7f4e2650215 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rshift_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor __rshift__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __irshift__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor __rshift__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __irshift__(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sgn.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sgn.h new file mode 100644 index 0000000000000000000000000000000000000000..9365ebd7cce663cf20593a674a455c6e0008e848 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sgn.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sgn(Tensor self) -> Tensor +inline at::Tensor sgn(const at::Tensor & self) { + return at::_ops::sgn::call(self); +} + +// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::sgn_out::call(self, out); +} +// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::sgn_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated2d.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated2d.h new file mode 100644 index 0000000000000000000000000000000000000000..12caf6c96824a09480609d9c9f0bbcddf1f4b697 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated2d.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor +inline at::Tensor slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated2d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation)); +} +namespace symint { + template ::value>> + at::Tensor slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated2d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation)); + } +} + +// aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor +inline at::Tensor slow_conv_dilated2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation); +} +namespace symint { + template ::value>> + at::Tensor slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation); + } +} + +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_dilated2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_dilated2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); + } +} + +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_dilated2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_dilated2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); + } +} + +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_dilated2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_dilated2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out); + } +} + +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_dilated2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_dilated2d_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..43a443e3cdcf87bdb3728e7dfddfcaa5f94c96a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & soft_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::soft_margin_loss_out::call(self, target, reduction, out); +} +// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & soft_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) { + return at::_ops::soft_margin_loss_out::call(self, target, reduction, out); +} + +// aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor +inline at::Tensor soft_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::soft_margin_loss::call(self, target, reduction); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_softmax_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cd9e2631a9abd488377c53455833605c44eb26b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_softmax_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_log_softmax { + using schema = at::Tensor (const at::Tensor &, int64_t, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_log_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..339220ef43f8ce2458dfd450628f57ae25c4e3f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_scaled_modified_bessel_k1 : public TensorIteratorBase { + + + void meta(const at::Tensor & x); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ac7d94935e9eb65c0480b93152f3a6de2a7c1693 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_scaled_modified_bessel_k1_out : public at::meta::structured_special_scaled_modified_bessel_k1 { +void impl(const at::Tensor & x, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..112df862ff51facbb20e6925329f59bbc5687d17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_xlog1py(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & special_xlog1py_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & special_xlog1py_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_for_size_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_for_size_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0a1a81e868dd8071471a8a43dbe05247d01443b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_for_size_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void sym_constrain_range_for_size(const at::Scalar & size, c10::optional min=c10::nullopt, c10::optional max=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensordot_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensordot_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..023a1dec6beb2dd53e6e66af495a7247f7fe89ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tensordot_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API tensordot { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::tensordot") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other); +}; + +struct TORCH_API tensordot_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::tensordot") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to.h new file mode 100644 index 0000000000000000000000000000000000000000..aeba267388b7d84ab67d35e5ba72d4ad5c1c2d28 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_dense_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_dense_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..60ae7d2ec94d682a55c5b9a37270592fc184567c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_dense_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor +inline at::Tensor to_dense_backward(const at::Tensor & grad, const at::Tensor & input, c10::optional masked_grad=c10::nullopt) { + return at::_ops::to_dense_backward::call(grad, input, masked_grad); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csc.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csc.h new file mode 100644 index 0000000000000000000000000000000000000000..9a9c7879687c05d2efad0b44c334068efe7d79f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csc.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..30fe07181bba8cd6093401257cb5664f86e9ca6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor transpose(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..83899990b92e9a8e14afc117c27961e0beae7a09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..61ca10e2d4844b6c434dd35ae717b00f6bf0ba76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_bilinear2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bilinear2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bilinear2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & upsample_bilinear2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bilinear2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..17e8ffc83f025ca70f4ab9084b8f4aca5150e070 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_trilinear3d_vec { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, bool, c10::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_trilinear3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor") + static at::Tensor call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); +}; + +struct TORCH_API upsample_trilinear3d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, bool, c10::optional, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_trilinear3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +}; + +struct TORCH_API upsample_trilinear3d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, bool, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_trilinear3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); +}; + +}} // namespace at::_ops