diff --git a/ckpts/universal/global_step20/zero/21.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/21.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..62346680b1634dc04d2cef2ed25747c401c8a6a1 --- /dev/null +++ b/ckpts/universal/global_step20/zero/21.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2b7c3e345d9b1c0d5946b975e182294c778ddad7f72a2a59de6dae54ff94c0d +size 50332828 diff --git a/ckpts/universal/global_step20/zero/21.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step20/zero/21.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..ba4b921f7a6cab906a9c1e535697c5e2ba708847 --- /dev/null +++ b/ckpts/universal/global_step20/zero/21.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc1c34c3c4d4c0dec676f2f309655c3a734dd54929cb5c54ecc49d6c5642e96c +size 50332749 diff --git a/ckpts/universal/global_step20/zero/23.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/23.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..06bf4ae5b30c9c4ed98d17f01f64e7313519e5c1 --- /dev/null +++ b/ckpts/universal/global_step20/zero/23.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9256ac66b8c0e0c358c6817f5284b2fc4594018333af0abf363815aa52928fc +size 9372 diff --git a/ckpts/universal/global_step20/zero/23.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/23.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..175952093572b3b05d2bdc7a600227567be89040 --- /dev/null +++ b/ckpts/universal/global_step20/zero/23.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fb039a7ef5d5457baa033214aa3eb5036cb3548ab970f351df642e4c8c6b5cf +size 9387 diff --git a/ckpts/universal/global_step20/zero/7.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step20/zero/7.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..c05e362a520a9cce1580f7c7a20906a94e3ef600 --- /dev/null +++ b/ckpts/universal/global_step20/zero/7.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a63748fcbde990d0476abdd89d09ea0db625fe111cc43b7b8694fed912cf19bc +size 50332749 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..c61d3f39cf6e294a0139652bca0cc10d8aecfa62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor +inline at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false) { + return at::_ops::_ctc_loss_backward::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity); +} + +// aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor +inline at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false) { + return at::_ops::_ctc_loss_backward_Tensor::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity); +} + +// aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _ctc_loss_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false) { + return at::_ops::_ctc_loss_backward_out::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out); +} +// aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _ctc_loss_backward_outf(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out) { + return at::_ops::_ctc_loss_backward_out::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..58794365c011d62adbf3a118ce2fafa9d4d7fd35 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple _cudnn_ctc_loss_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity); +TORCH_API ::std::tuple _cudnn_ctc_loss_outf(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_efficientzerotensor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_efficientzerotensor.h new file mode 100644 index 0000000000000000000000000000000000000000..d7d38b326b539a9dd0613ab166a40a32a826a0b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_efficientzerotensor.h @@ -0,0 +1,113 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_efficientzerotensor::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_efficientzerotensor::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _efficientzerotensor(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_efficientzerotensor::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor _efficientzerotensor(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_efficientzerotensor::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } +} + +// aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _efficientzerotensor_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_efficientzerotensor::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor _efficientzerotensor(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_efficientzerotensor::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _efficientzerotensor_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_efficientzerotensor::call(size, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor _efficientzerotensor(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_efficientzerotensor::call(size, dtype, layout, device, pin_memory); + } +} + +// aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _efficientzerotensor_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::_efficientzerotensor_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & _efficientzerotensor_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::_efficientzerotensor_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _efficientzerotensor_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_efficientzerotensor_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & _efficientzerotensor_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_efficientzerotensor_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _efficientzerotensor_symint_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::_efficientzerotensor_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & _efficientzerotensor_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::_efficientzerotensor_out::call(size, out); + } +} + +// aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _efficientzerotensor_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::_efficientzerotensor_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & _efficientzerotensor_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::_efficientzerotensor_out::call(size, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..3c3aad1858cf60a521a1cf0f9a127b897b80d75f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) +inline ::std::tuple _fake_quantize_learnable_per_tensor_affine_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) { + return at::_ops::_fake_quantize_learnable_per_tensor_affine_backward::call(grad, self, scale, zero_point, quant_min, quant_max, grad_factor); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bf0fa8e75559db3545c0abe99c8cba08ff886b64 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _fake_quantize_learnable_per_tensor_affine_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fake_quantize_learnable_per_tensor_affine_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h new file mode 100644 index 0000000000000000000000000000000000000000..5dd8422289e8197308409de4d24ec92918be38cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask) +inline ::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) { + return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max); +} + +// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) { + return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1); +} +// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fill_mem_eff_dropout_mask_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fill_mem_eff_dropout_mask_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..065bbe0fdbfab8ee4f648cc0826dc2f32eaaee16 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fill_mem_eff_dropout_mask_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _fill_mem_eff_dropout_mask_ { + using schema = at::Tensor & (at::Tensor &, double, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fill_mem_eff_dropout_mask_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fill_mem_eff_dropout_mask_(Tensor(a!) self, float dropout_p, int seed, int offset) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, double dropout_p, int64_t seed, int64_t offset); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double dropout_p, int64_t seed, int64_t offset); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_maximum_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_maximum_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a31ad02708c1f35021a7c20c159447633a79ea0c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_maximum_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_maximum(at::TensorList self, const at::Scalar & scalar); +TORCH_API void _foreach_maximum_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector _foreach_maximum(at::TensorList self, at::TensorList other); +TORCH_API void _foreach_maximum_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector _foreach_maximum(at::TensorList self, at::ArrayRef scalars); +TORCH_API void _foreach_maximum_(at::TensorList self, at::ArrayRef scalars); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..772b04f3a7c758c27a441be322388b6a633ab93f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_sign(at::TensorList self); +TORCH_API void _foreach_sign_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_for_size_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_for_size_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7d2c5a7b75e7134630dd719ef68a7b22ad1e4ec3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_for_size_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _functional_sym_constrain_range_for_size(const at::Scalar & size, c10::optional min, c10::optional max, const at::Tensor & dep_token); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3173d9b0f42e64047edb63efcbf11c2dd3c993aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple _linalg_eigh(const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ced73ac79993a2dab989280be77d865b74663bfd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple _native_batch_norm_legit_no_training(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_no_training_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_no_training_outf(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4beaa581c0d6d209b8e9c253c3c53fb0a778d9ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _native_multi_head_attention { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, bool, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_multi_head_attention") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type); +}; + +struct TORCH_API _native_multi_head_attention_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, bool, bool, c10::optional, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_multi_head_attention") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_jagged_dummy_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_jagged_dummy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5127ff1f0312f675412976861009a598591df696 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_jagged_dummy_native.h @@ -0,0 +1,20 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_strides_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_strides_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c7ac7f2e03f8085b485366152e7b3672316dda7d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_strides_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _nested_tensor_strides_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor _nested_tensor_strides(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_from_tensor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_from_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..2d21aa9f503f093b4df876410540bc04748ca51a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_from_tensor.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor +inline at::Tensor _reshape_from_tensor(const at::Tensor & self, const at::Tensor & shape) { + return at::_ops::_reshape_from_tensor::call(self, shape); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..190051feb674f344c16bcfd81df596f0bd3946ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _segment_reduce_backward(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths={}, const c10::optional & offsets={}, int64_t axis=0, const c10::optional & initial=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..adb0ea402a3dd35105063a7563ee10899ef8596e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor _sparse_compressed_tensor_unsafe_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor _sparse_compressed_tensor_unsafe_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..70bf2d16432e36ffee4ec288f10648b7eb2c918d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _sparse_sum_backward_out(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out); +TORCH_API at::Tensor _sparse_sum_backward_cpu(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim); +TORCH_API at::Tensor _sparse_sum_backward_cuda(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c6104a5ca54fc296939305b4cdc9349b94b48005 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _test_autograd_multiple_dispatch_fullcoverage(const at::Tensor & self); +TORCH_API at::Tensor & _test_autograd_multiple_dispatch_fullcoverage_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor _test_autograd_multiple_dispatch_ntonly(const at::Tensor & self, bool b); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured.h new file mode 100644 index 0000000000000000000000000000000000000000..23608aad68dee43866afd07808991a12d0f5e2f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor) +inline ::std::tuple _to_sparse_semi_structured(const at::Tensor & dense) { + return at::_ops::_to_sparse_semi_structured::call(dense); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/alpha_dropout_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/alpha_dropout_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1262aeafbb6f74f793019962476fbc0a231b85b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/alpha_dropout_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor alpha_dropout(const at::Tensor & input, double p, bool train); +TORCH_API at::Tensor & alpha_dropout_(at::Tensor & self, double p, bool train); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arccosh.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arccosh.h new file mode 100644 index 0000000000000000000000000000000000000000..3335e73c2e0cd6ccabe35e595440e4940b9be439 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arccosh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::arccosh(Tensor self) -> Tensor +inline at::Tensor arccosh(const at::Tensor & self) { + return at::_ops::arccosh::call(self); +} + +// aten::arccosh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & arccosh_(at::Tensor & self) { + return at::_ops::arccosh_::call(self); +} + +// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arccosh_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::arccosh_out::call(self, out); +} +// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arccosh_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::arccosh_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_native.h new file mode 100644 index 0000000000000000000000000000000000000000..55e450a3e083a977325a7d266aa65cb168b41c08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_argmax_out : public at::meta::structured_argmax { +void impl(const at::Tensor & self, c10::optional dim, bool keepdim, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..071ad94112b0eadd228b43dbb264d4dab892b797 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API as_strided { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::as_strided") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); +}; + +struct TORCH_API as_strided_ { + using schema = const at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::as_strided_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2429ecf6be19d32a6f7dd85bbfc725389e501b74 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor atan(const at::Tensor & self); +TORCH_API at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & atan_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..03cee9a331c53827fd5c6ba35a6a535477329aba --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & binary_cross_entropy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & binary_cross_entropy_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/chain_matmul_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/chain_matmul_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..379d1b7a5afe03d1bff0118f71418db7c0d00e89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/chain_matmul_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor chain_matmul(at::TensorList matrices); +TORCH_API at::Tensor & chain_matmul_out(at::Tensor & out, at::TensorList matrices); +TORCH_API at::Tensor & chain_matmul_outf(at::TensorList matrices, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/column_stack.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/column_stack.h new file mode 100644 index 0000000000000000000000000000000000000000..96e1a7a5d5777a79a83fbdb6332dca912ca66537 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/column_stack.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::column_stack(Tensor[] tensors) -> Tensor +inline at::Tensor column_stack(at::TensorList tensors) { + return at::_ops::column_stack::call(tensors); +} + +// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & column_stack_out(at::Tensor & out, at::TensorList tensors) { + return at::_ops::column_stack_out::call(tensors, out); +} +// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & column_stack_outf(at::TensorList tensors, at::Tensor & out) { + return at::_ops::column_stack_out::call(tensors, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/count_nonzero_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/count_nonzero_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1999db6a93a96890eeb3e47abc9e9643eab2ecca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/count_nonzero_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API count_nonzero_dim_IntList { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::count_nonzero") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_IntList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim); +}; + +struct TORCH_API count_nonzero { + using schema = at::Tensor (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::count_nonzero") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "count_nonzero(Tensor self, int? dim=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim); +}; + +struct TORCH_API count_nonzero_dim_IntList_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::count_nonzero") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_IntList_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out); +}; + +struct TORCH_API count_nonzero_out { + using schema = at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::count_nonzero") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::optional dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummax_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f772c6d9317c2d84c8c6ca8a123d8be65c46d6ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cummax_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cummax { + using schema = ::std::tuple (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cummax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)") + static ::std::tuple call(const at::Tensor & self, int64_t dim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim); +}; + +struct TORCH_API cummax_out { + using schema = ::std::tuple (const at::Tensor &, int64_t, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cummax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)") + static ::std::tuple call(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices); +}; + +struct TORCH_API cummax_dimname { + using schema = ::std::tuple (const at::Tensor &, at::Dimname); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cummax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)") + static ::std::tuple call(const at::Tensor & self, at::Dimname dim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim); +}; + +struct TORCH_API cummax_dimname_out { + using schema = ::std::tuple (const at::Tensor &, at::Dimname, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cummax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)") + static ::std::tuple call(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/elu_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/elu_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2a41dacca282df6d0f68c1e5aaf0b0afd1fbabc3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/elu_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_elu_backward_out : public at::meta::structured_elu_backward { +void impl(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_bag_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_bag_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4c539f8f915d53d1e0e6c7336debc9c5a9c442e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_bag_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false); +TORCH_API ::std::tuple embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, c10::optional padding_idx); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_native.h new file mode 100644 index 0000000000000000000000000000000000000000..48202a3295a2eb9d09b5d1b6b68106b4c92e3b5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor expand(const at::Tensor & self, at::IntArrayRef size, bool implicit=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..547d98cee7414dc44598665c4fbe2aad37f2399e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fake_quantize_per_channel_affine_cachemask_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fake_quantize_per_channel_affine_cachemask_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor") + static at::Tensor call(const at::Tensor & grad, const at::Tensor & mask); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & mask); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0bae4be03f2045d0d49c37c675cc669a1f7276f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_rfft_symint(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_rfft_symint_out(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a090ebcd807b51431038fbc01ec2ec46986aaafa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hypot(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_native.h new file mode 100644 index 0000000000000000000000000000000000000000..40d0e53bbb3b1ca2e4fb4432ef0a7329802d4e1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_vulkan_available(); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan.h new file mode 100644 index 0000000000000000000000000000000000000000..4222f80d1a3525edc2ee5cf55fb287c0d63f3ab0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isnan(Tensor self) -> Tensor +inline at::Tensor isnan(const at::Tensor & self) { + return at::_ops::isnan::call(self); +} + +// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isnan_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::isnan_out::call(self, out); +} +// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isnan_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::isnan_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a4e7e3a6ae918f437a65a7efa32ebca8ad052dd9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor isreal(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..551e1851e842e972401ac83b1e57bfc1fe0be40e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input); +} +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) { + return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input); +} + +// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor +inline at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_native.h new file mode 100644 index 0000000000000000000000000000000000000000..201caa3b5897199558b88aac0a88911a36dde66b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_eig(const at::Tensor & self); +TORCH_API ::std::tuple linalg_eig_out(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a24b989f78a64354cba3ba110fe7226ddd722539 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_lu_out : public at::meta::structured_linalg_lu { +void impl(const at::Tensor & A, bool pivot, const at::Tensor & P, const at::Tensor & L, const at::Tensor & U); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul.h new file mode 100644 index 0000000000000000000000000000000000000000..56abdbe006e347e85dcef7a28420eb67eb887200 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_matmul(Tensor self, Tensor other) -> Tensor +inline at::Tensor linalg_matmul(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::linalg_matmul::call(self, other); +} + +// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::linalg_matmul_out::call(self, other, out); +} +// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::linalg_matmul_out::call(self, other, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f8ec729a51449a1c18d5ba105937f5eef48d30f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ce9ae3041d664f991cc4f62a4a6697c5595f5d3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_linear_backward_weights { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_linear_backward_weights") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined); +}; + +struct TORCH_API mkldnn_linear_backward_weights_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_linear_backward_weights") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mul_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mul_native.h new file mode 100644 index 0000000000000000000000000000000000000000..025d3882c981acdcb6cc1588c8b9e513e085d79b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mul_native.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_mul_out : public at::meta::structured_mul_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor NestedTensor_mul_Tensor(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & NestedTensor_mul__Tensor(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor mul_sparse(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & mul_out_sparse_cpu(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & mul_sparse_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & mul_out_sparse_cuda(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor mul_sparse_csr(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & mul_out_sparse_csr(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & mul_sparse_csr_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor mkldnn_mul(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & mkldnn_mul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & mkldnn_mul_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor mul_zerotensor(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor mul(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & mul_Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & mul_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor NestedTensor_mul_Scalar(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & NestedTensor_mul__Scalar(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor mul_scalar_sparse_csr(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & mul__scalar_sparse_csr(at::Tensor & self, const at::Scalar & other); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b1a05725ec879f23ed1e3066666202f93b163901 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor multi_margin_loss(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multi_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multi_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight, int64_t reduction, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/normal_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/normal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9047c6838522f22bff4f5fa1dc45247132554a05 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/normal_ops.h @@ -0,0 +1,138 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API normal_ { + using schema = at::Tensor & (at::Tensor &, double, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::normal_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, double mean, double std, c10::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean, double std, c10::optional generator); +}; + +struct TORCH_API normal_functional { + using schema = at::Tensor (const at::Tensor &, double, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::normal_functional") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, double mean, double std, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional generator); +}; + +struct TORCH_API normal_Tensor_float_out { + using schema = at::Tensor & (const at::Tensor &, double, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::normal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_float_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & mean, double std, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, double std, c10::optional generator, at::Tensor & out); +}; + +struct TORCH_API normal_Tensor_float { + using schema = at::Tensor (const at::Tensor &, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::normal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_float") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & mean, double std, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, double std, c10::optional generator); +}; + +struct TORCH_API normal_float_Tensor_out { + using schema = at::Tensor & (double, const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::normal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "float_Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(double mean, const at::Tensor & std, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, double mean, const at::Tensor & std, c10::optional generator, at::Tensor & out); +}; + +struct TORCH_API normal_float_Tensor { + using schema = at::Tensor (double, const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::normal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "float_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor") + static at::Tensor call(double mean, const at::Tensor & std, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, double mean, const at::Tensor & std, c10::optional generator); +}; + +struct TORCH_API normal_Tensor_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::normal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & mean, const at::Tensor & std, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, const at::Tensor & std, c10::optional generator, at::Tensor & out); +}; + +struct TORCH_API normal_Tensor_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::normal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & mean, const at::Tensor & std, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, const at::Tensor & std, c10::optional generator); +}; + +struct TORCH_API normal_float_float { + using schema = at::Tensor (double, double, c10::SymIntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::normal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "float_float") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(double mean, double std, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, double mean, double std, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API normal_float_float_out { + using schema = at::Tensor & (double, double, c10::SymIntArrayRef, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::normal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "float_float_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(double mean, double std, c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, double mean, double std, c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out); +}; + +struct TORCH_API normal_out { + using schema = at::Tensor & (const at::Tensor &, double, double, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::normal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double mean, double std, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional generator, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/outer_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/outer_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b1ca274d9d70c879ebc8a9109f67b41edd21e90a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/outer_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API outer { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::outer") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "outer(Tensor self, Tensor vec2) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & vec2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2); +}; + +struct TORCH_API outer_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::outer") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2643c297a11c7c19013e7cf61eef0cbbd02a89c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step=1, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & range_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out); +TORCH_API at::Tensor & range_cuda_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out); +TORCH_API at::Tensor range(const at::Scalar & start, const at::Scalar & end, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & range_out_no_step(const at::Scalar & start, const at::Scalar & end, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2b40e647bb0e21a2d56a120d10b8427dfc2100a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor replication_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/retains_grad.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/retains_grad.h new file mode 100644 index 0000000000000000000000000000000000000000..c0b70546bf24b4769d378448cd884d5aa384e7af --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/retains_grad.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rsub.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rsub.h new file mode 100644 index 0000000000000000000000000000000000000000..57292c4463b701830d28b0274805235e4cb5e4df --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rsub.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +inline at::Tensor rsub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Tensor::call(self, other, alpha); +} + +// aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +inline at::Tensor rsub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Scalar::call(self, other, alpha); +} + +// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rsub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Tensor_out::call(self, other, alpha, out); +} +// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rsub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::rsub_Tensor_out::call(self, other, alpha, out); +} + +// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rsub_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Scalar_out::call(self, other, alpha, out); +} +// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rsub_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::rsub_Scalar_out::call(self, other, alpha, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scaled_dot_product_attention_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scaled_dot_product_attention_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..38336240175b170bcd3566c0644913c8b1f20c2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scaled_dot_product_attention_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, c10::optional scale=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid.h new file mode 100644 index 0000000000000000000000000000000000000000..c80f43a0fb688e9ba02f13c6a0bbb1edb430488b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sigmoid(Tensor self) -> Tensor +inline at::Tensor sigmoid(const at::Tensor & self) { + return at::_ops::sigmoid::call(self); +} + +// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & sigmoid_(at::Tensor & self) { + return at::_ops::sigmoid_::call(self); +} + +// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sigmoid_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::sigmoid_out::call(self, out); +} +// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sigmoid_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::sigmoid_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..2048840db34c15a10054107ce94296f5c94023dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_sigmoid : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose3d_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..268026e3646dac3b2269b67e02cb1a0ccd9d4652 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose3d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor slow_conv_transpose3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor slow_conv_transpose3d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); +TORCH_API at::Tensor & slow_conv_transpose3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor & slow_conv_transpose3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out); +TORCH_API at::Tensor & slow_conv_transpose3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); +TORCH_API at::Tensor & slow_conv_transpose3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3ac1521d3cff2182e4677f1f754097020dab9006 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h.h new file mode 100644 index 0000000000000000000000000000000000000000..579839d64868c34f08847a92d9a3ce6209dd4f2a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor +inline at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_h::call(x, n); +} + +// aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor +inline at::Tensor special_hermite_polynomial_h(const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_h_x_scalar::call(x, n); +} + +// aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor +inline at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_hermite_polynomial_h_n_scalar::call(x, n); +} + +// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_h_out::call(x, n, out); +} +// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_hermite_polynomial_h_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_h_out::call(x, n, out); +} + +// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_h_x_scalar_out::call(x, n, out); +} +// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_hermite_polynomial_h_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_h_x_scalar_out::call(x, n, out); +} + +// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_hermite_polynomial_h_n_scalar_out::call(x, n, out); +} +// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_hermite_polynomial_h_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_h_n_scalar_out::call(x, n, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9b1592b95c0d94f71545326de31c8508cba91a5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_hermite_polynomial_h { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_hermite_polynomial_h") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n); +}; + +struct TORCH_API special_hermite_polynomial_h_x_scalar { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_hermite_polynomial_h") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor") + static at::Tensor call(const at::Scalar & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n); +}; + +struct TORCH_API special_hermite_polynomial_h_n_scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_hermite_polynomial_h") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Scalar & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n); +}; + +struct TORCH_API special_hermite_polynomial_h_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_hermite_polynomial_h") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_hermite_polynomial_h_x_scalar_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_hermite_polynomial_h") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_hermite_polynomial_h_n_scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_hermite_polynomial_h") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2fd0fdd80b73d55053bcf719d448a5671ce36b6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_i1(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..36049fb8c569d819488536ece367000a01480527 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_i1e(const at::Tensor & self); +TORCH_API at::Tensor & special_i1e_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_i1e_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_native.h new file mode 100644 index 0000000000000000000000000000000000000000..06b8929f0cc17a1c8f3dc89b48bc3e4dffb48d29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_i1e_out : public at::meta::structured_special_i1e { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e51c50a1a056fb7b8602952fd7720b11f5feced --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_log_ndtr(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e63e5eb122f6e93169b0ce13982e69e777559629 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_scaled_modified_bessel_k0(const at::Tensor & x); +TORCH_API at::Tensor & special_scaled_modified_bessel_k0_out(at::Tensor & out, const at::Tensor & x); +TORCH_API at::Tensor & special_scaled_modified_bessel_k0_outf(const at::Tensor & x, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_t_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_t_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..38796f7b69eaafa55709ac69ff24694d1222b4fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_t_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u.h new file mode 100644 index 0000000000000000000000000000000000000000..5c6be54e51fbb64991640bba609f3b8333425093 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor +inline at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u::call(x, n); +} + +// aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor +inline at::Tensor special_shifted_chebyshev_polynomial_u(const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar::call(x, n); +} + +// aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor +inline at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar::call(x, n); +} + +// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_out::call(x, n, out); +} +// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_u_out::call(x, n, out); +} + +// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar_out::call(x, n, out); +} +// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar_out::call(x, n, out); +} + +// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar_out::call(x, n, out); +} +// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar_out::call(x, n, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1e234e08352752b83f10f552f4b98bb4af1a5f7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_shifted_chebyshev_polynomial_w { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_shifted_chebyshev_polynomial_w") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n); +}; + +struct TORCH_API special_shifted_chebyshev_polynomial_w_x_scalar { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_shifted_chebyshev_polynomial_w") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor") + static at::Tensor call(const at::Scalar & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n); +}; + +struct TORCH_API special_shifted_chebyshev_polynomial_w_n_scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_shifted_chebyshev_polynomial_w") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Scalar & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n); +}; + +struct TORCH_API special_shifted_chebyshev_polynomial_w_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_shifted_chebyshev_polynomial_w") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_shifted_chebyshev_polynomial_w_x_scalar_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_shifted_chebyshev_polynomial_w") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_shifted_chebyshev_polynomial_w_n_scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_shifted_chebyshev_polynomial_w") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1b9e1fe036a0b8c9e09aa1050bf4c192cb9c7742 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor tril(const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & tril_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & tril_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out); +TORCH_API at::Tensor & tril_(at::Tensor & self, int64_t diagonal=0); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_with_sizes_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_with_sizes_native.h new file mode 100644 index 0000000000000000000000000000000000000000..34750e95124062b9c10ab58569b8d962d79354c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_with_sizes_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector unsafe_split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0); +TORCH_API void unsafe_split_with_sizes_out_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..15de400f78b921372783b0036cf5ba0b30f697cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_bicubic2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..75c32e0d9adc5df2b40a2ebe1892ce759c325847 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_linear1d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_linear1d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input); +}; + +struct TORCH_API upsample_linear1d_backward { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_linear1d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/values_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/values_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..053c463044e25f5405d6639686127f43531cef4e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/values_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API values { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::values") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "values(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/zero_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/zero_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..533113c43f0fdae65703d1221af15b4e30aa8f57 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/zero_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & zero_(at::Tensor & self); + +} // namespace cpu +} // namespace at