diff --git a/ckpts/universal/global_step120/zero/12.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/12.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..74f367fb5c75c4b81be2f5674738d38c8bfcc1b3 --- /dev/null +++ b/ckpts/universal/global_step120/zero/12.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01505932b05c7c0bedcaa2c8088dbf012a6583f800d7fdc1d276a9ea1207972f +size 9372 diff --git a/ckpts/universal/global_step120/zero/12.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/12.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..1bb0ced8ef19457e3dd0e9733c3909d31ba03774 --- /dev/null +++ b/ckpts/universal/global_step120/zero/12.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adaffa33ea74f3af92a83a8f66701112575132cf81cde6c00667338d0c3d2f5a +size 9387 diff --git a/ckpts/universal/global_step120/zero/12.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/12.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..4ffb0dcf64509651f9dd2113c0c7903a640a8955 --- /dev/null +++ b/ckpts/universal/global_step120/zero/12.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eef869eab016faaaf9a35b135e86d9e7f4c9ded8c5f51cc9c4f4755a7fa91f6d +size 9293 diff --git a/ckpts/universal/global_step120/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..a11899daf3835f714213485bdfa493bd81dd68f3 --- /dev/null +++ b/ckpts/universal/global_step120/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e39e7127d885eaf4ff39b7833827e0a4c65f849fbc5c7cd507d68aa8d83af97f +size 33555627 diff --git a/ckpts/universal/global_step120/zero/16.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step120/zero/16.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..9172478aec3a22b2233d5ede9a9c6a7fdf64dc50 --- /dev/null +++ b/ckpts/universal/global_step120/zero/16.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71f55ac04cb6edf5cdd24b486b8261f7419f3d01c1494e41225c16d712ae743f +size 33555533 diff --git a/ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..3dbf7bbcdd4ce373ae731082e51c11857a752c9e --- /dev/null +++ b/ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b0198731a525787ebaf141ea8b7bcda02bd0273bb50ba2a781670af33dc6c15 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/6.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/6.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..8444f0a639894df42752757a6f9d921e61e427a6 --- /dev/null +++ b/ckpts/universal/global_step120/zero/6.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:321f0b018f0abbea77cda353033e066b3cfec5daea715d5b5f8a197223fc8d15 +size 33555627 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..e11841abe576950625da71b3bb2cf7eecc548e6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor +inline at::Tensor _adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::_adaptive_avg_pool3d_backward::call(grad_output, self); +} + +// aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _adaptive_avg_pool3d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::_adaptive_avg_pool3d_backward_out::call(grad_output, self, out); +} +// aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_adaptive_avg_pool3d_backward_out::call(grad_output, self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d6313ce2d653d8b9b4f0841cf22f5459862082fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _cast_Double(const at::Tensor & self, bool non_blocking=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_double_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_double_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1c0ad3879b658222bdbbe956c86301c46621b8a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_double_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _convolution_double_backward(const c10::optional & ggI, const c10::optional & ggW, const c10::optional & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight_native.h new file mode 100644 index 0000000000000000000000000000000000000000..10e5a1606fbdfa05181cf395000b421b065a85e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _cudnn_rnn_flatten_weight_out_symint(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out); +TORCH_API at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cf09e85622b90455b99fa4d3b374719ddb923a24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _efficient_attention_backward(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & bias, const at::Tensor & out, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, int64_t max_seqlen_q, int64_t max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, c10::optional scale=c10::nullopt, c10::optional num_splits_key=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foobar_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foobar_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b599e69578952940cc167aaeb4b109a1de870520 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foobar_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foobar { + using schema = at::Tensor (const at::Tensor &, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foobar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor") + static at::Tensor call(const at::Tensor & self, bool arg1, bool arg2, bool arg3); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool arg1, bool arg2, bool arg3); +}; + +struct TORCH_API _foobar_out { + using schema = at::Tensor & (const at::Tensor &, bool, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foobar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bbf98f5b794bf027208cf86842c163c5d9c7f2e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow_native.h @@ -0,0 +1,37 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_pow_List_out(at::TensorList self, at::TensorList exponent, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_pow_list_kernel_slow(at::TensorList self, at::TensorList exponent); +TORCH_API void foreach_tensor_pow_list_kernel_slow_(at::TensorList self, at::TensorList exponent); +TORCH_API ::std::vector foreach_tensor_pow_list_kernel_cuda(at::TensorList self, at::TensorList exponent); +TORCH_API void foreach_tensor_pow_list_kernel_cuda_(at::TensorList self, at::TensorList exponent); +TORCH_API void _foreach_pow_Scalar_out(at::TensorList self, const at::Scalar & exponent, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_pow_scalar_kernel_slow(at::TensorList self, const at::Scalar & exponent); +TORCH_API void foreach_tensor_pow_scalar_kernel_slow_(at::TensorList self, const at::Scalar & exponent); +TORCH_API ::std::vector foreach_tensor_pow_scalar_kernel_cuda(at::TensorList self, const at::Scalar & exponent); +TORCH_API void foreach_tensor_pow_scalar_kernel_cuda_(at::TensorList self, const at::Scalar & exponent); +TORCH_API void _foreach_pow_ScalarList_out(at::TensorList self, at::ArrayRef exponent, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_pow_scalarlist_kernel_slow(at::TensorList self, at::ArrayRef exponent); +TORCH_API void foreach_tensor_pow_scalarlist_kernel_slow_(at::TensorList self, at::ArrayRef exponent); +TORCH_API ::std::vector foreach_tensor_pow_scalarlist_kernel_cuda(at::TensorList self, at::ArrayRef exponent); +TORCH_API void foreach_tensor_pow_scalarlist_kernel_cuda_(at::TensorList self, at::ArrayRef exponent); +TORCH_API ::std::vector foreach_scalar_pow_list_kernel_slow(const at::Scalar & self, at::TensorList exponent); +TORCH_API ::std::vector foreach_scalar_pow_list_kernel_cuda(const at::Scalar & self, at::TensorList exponent); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_copy_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ed385cdfbb788fa69ddbbb2388ff52a521470cfe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _indices_copy_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor _indices_copy(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_local_scalar_dense_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_local_scalar_dense_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d925266bd119c579ae011f9b554cb0961f7fe19d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_local_scalar_dense_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _local_scalar_dense { + using schema = at::Scalar (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_local_scalar_dense") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_local_scalar_dense(Tensor self) -> Scalar") + static at::Scalar call(const at::Tensor & self); + static at::Scalar redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..50ce5d88f1dca61e34b3efdb487695af2ef4edb3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float); +TORCH_API at::Tensor & _log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float); +TORCH_API at::Tensor & _log_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnz_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnz_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6bb151e52ca9d3d64341664202c25a79dd22a67e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnz_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API int64_t _nnz_sparse(const at::Tensor & self); +TORCH_API int64_t _nnz_sparse_csr(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_forward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_forward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..94b34e69485e98dde4e68afc111d28b9fcbcae19 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_forward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _pdist_forward { + using schema = at::Tensor (const at::Tensor &, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pdist_forward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pdist_forward(Tensor self, float p=2) -> Tensor") + static at::Tensor call(const at::Tensor & self, double p); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p); +}; + +struct TORCH_API _pdist_forward_out { + using schema = at::Tensor & (const at::Tensor &, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pdist_forward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double p, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_print_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_print_native.h new file mode 100644 index 0000000000000000000000000000000000000000..578af87de4f2bb78cc35047c1c4e25c34a84cea7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_print_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _print(c10::string_view s); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_resize_output.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_resize_output.h new file mode 100644 index 0000000000000000000000000000000000000000..f116610a62495f8061ac5578f60eb1d47666f181 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_resize_output.h @@ -0,0 +1,113 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_resize_output_(Tensor(a!) self, SymInt[] size, Device device) -> Tensor(a!) +inline const at::Tensor & _resize_output_(const at::Tensor & self, at::IntArrayRef size, at::Device device) { + return at::_ops::_resize_output_::call(self, c10::fromIntArrayRefSlow(size), device); +} +namespace symint { + template ::value>> + const at::Tensor & _resize_output_(const at::Tensor & self, at::IntArrayRef size, at::Device device) { + return at::_ops::_resize_output_::call(self, c10::fromIntArrayRefSlow(size), device); + } +} + +// aten::_resize_output_(Tensor(a!) self, SymInt[] size, Device device) -> Tensor(a!) +inline const at::Tensor & _resize_output__symint(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) { + return at::_ops::_resize_output_::call(self, size, device); +} +namespace symint { + template ::value>> + const at::Tensor & _resize_output_(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) { + return at::_ops::_resize_output_::call(self, size, device); + } +} + +// aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & _resize_output_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::Device device) { + return at::_ops::_resize_output_out::call(self, c10::fromIntArrayRefSlow(size), device, out); +} +namespace symint { + template ::value>> + const at::Tensor & _resize_output_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::Device device) { + return at::_ops::_resize_output_out::call(self, c10::fromIntArrayRefSlow(size), device, out); + } +} + +// aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & _resize_output_outf(const at::Tensor & self, at::IntArrayRef size, at::Device device, const at::Tensor & out) { + return at::_ops::_resize_output_out::call(self, c10::fromIntArrayRefSlow(size), device, out); +} +namespace symint { + template ::value>> + const at::Tensor & _resize_output_outf(const at::Tensor & self, at::IntArrayRef size, at::Device device, const at::Tensor & out) { + return at::_ops::_resize_output_out::call(self, c10::fromIntArrayRefSlow(size), device, out); + } +} + +// aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & _resize_output_symint_out(const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) { + return at::_ops::_resize_output_out::call(self, size, device, out); +} +namespace symint { + template ::value>> + const at::Tensor & _resize_output_out(const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) { + return at::_ops::_resize_output_out::call(self, size, device, out); + } +} + +// aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & _resize_output_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device, const at::Tensor & out) { + return at::_ops::_resize_output_out::call(self, size, device, out); +} +namespace symint { + template ::value>> + const at::Tensor & _resize_output_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device, const at::Tensor & out) { + return at::_ops::_resize_output_out::call(self, size, device, out); + } +} + +// aten::_resize_output(Tensor self, SymInt[] size, Device device) -> Tensor +inline at::Tensor _resize_output(const at::Tensor & self, at::IntArrayRef size, at::Device device) { + return at::_ops::_resize_output::call(self, c10::fromIntArrayRefSlow(size), device); +} +namespace symint { + template ::value>> + at::Tensor _resize_output(const at::Tensor & self, at::IntArrayRef size, at::Device device) { + return at::_ops::_resize_output::call(self, c10::fromIntArrayRefSlow(size), device); + } +} + +// aten::_resize_output(Tensor self, SymInt[] size, Device device) -> Tensor +inline at::Tensor _resize_output_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) { + return at::_ops::_resize_output::call(self, size, device); +} +namespace symint { + template ::value>> + at::Tensor _resize_output(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) { + return at::_ops::_resize_output::call(self, size, device); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..e972b833fcca912b7c549975c981bc19292f424f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor) +inline ::std::tuple _scaled_dot_product_efficient_attention_backward(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array grad_input_mask, bool is_causal=false, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_efficient_attention_backward::call(grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf48c5a2923e68114c1ac96801370f75eaaba58e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _test_functorch_fallback_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & _test_functorch_fallback_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fa84dce4e155ab6b90486921b9fc0e11b17c7af3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _upsample_nearest_exact2d_vec { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, c10::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_nearest_exact2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor") + static at::Tensor call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors); +}; + +struct TORCH_API _upsample_nearest_exact2d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_nearest_exact2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +}; + +struct TORCH_API _upsample_nearest_exact2d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_nearest_exact2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_ctc_loss_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_ctc_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7bb65a27d711ef410200c7db9b481638453d22ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_ctc_loss_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _use_cudnn_ctc_loss { + using schema = bool (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_use_cudnn_ctc_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool") + static bool call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank); +}; + +struct TORCH_API _use_cudnn_ctc_loss_Tensor { + using schema = bool (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_use_cudnn_ctc_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool") + static bool call(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_values_copy_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_values_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aa460e7de9ee8159633f4e10091b464b36db561d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_values_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor _values_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c18a5e143f144dad091e90230e93668e662681c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool2d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..42cf40440ed6389404c32ba0acf214e0a2fbe538 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcmul_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..e2280786dd6a6351113c8ed71d80fa76063a34b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_backward.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor +inline at::Tensor affine_grid_generator_backward(const at::Tensor & grad, at::IntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_backward::call(grad, c10::fromIntArrayRefSlow(size), align_corners); +} +namespace symint { + template ::value>> + at::Tensor affine_grid_generator_backward(const at::Tensor & grad, at::IntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_backward::call(grad, c10::fromIntArrayRefSlow(size), align_corners); + } +} + +// aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor +inline at::Tensor affine_grid_generator_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_backward::call(grad, size, align_corners); +} +namespace symint { + template ::value>> + at::Tensor affine_grid_generator_backward(const at::Tensor & grad, c10::SymIntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_backward::call(grad, size, align_corners); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/any_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/any_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d1c89b9aa8374aeb2c8d6ddaba291c8493ab26ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/any_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor any(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API at::Tensor & any_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eaf63befae1df14f90a63bac6e8edbc622b7df07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor argmax(const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false); +TORCH_API at::Tensor & argmax_out(at::Tensor & out, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false); +TORCH_API at::Tensor & argmax_outf(const at::Tensor & self, c10::optional dim, bool keepdim, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_scatter.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_scatter.h new file mode 100644 index 0000000000000000000000000000000000000000..0bcd01d3f1be2d6c955c5e296027437569ef7401 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_scatter.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +inline at::Tensor as_strided_scatter(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); +} +namespace symint { + template ::value>> + at::Tensor as_strided_scatter(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); + } +} + +// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +inline at::Tensor as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset); +} +namespace symint { + template ::value>> + at::Tensor as_strided_scatter(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset); + } +} + +// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & as_strided_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter_out::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); +} +namespace symint { + template ::value>> + at::Tensor & as_strided_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter_out::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); + } +} + +// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & as_strided_scatter_outf(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_scatter_out::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); +} +namespace symint { + template ::value>> + at::Tensor & as_strided_scatter_outf(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_scatter_out::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); + } +} + +// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & as_strided_scatter_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter_out::call(self, src, size, stride, storage_offset, out); +} +namespace symint { + template ::value>> + at::Tensor & as_strided_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter_out::call(self, src, size, stride, storage_offset, out); + } +} + +// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & as_strided_scatter_symint_outf(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_scatter_out::call(self, src, size, stride, storage_offset, out); +} +namespace symint { + template ::value>> + at::Tensor & as_strided_scatter_outf(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_scatter_out::call(self, src, size, stride, storage_offset, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b83a42904042d396533d39816ab5cbaf6c04cb60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e5423a5c16e3363fbbac347f3d01f57f3a71e94c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple batch_norm_update_stats(const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cbd582bd3ae4c0f38c4ab3c019d053cf665f61f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor bincount(const at::Tensor & self, const c10::optional & weights={}, int64_t minlength=0); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..81ef8f49f347021549da10b30576e58c7b3d67e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cauchy_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cauchy_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6a89327379f4ed7b00354c2f08fd45ab5be5b33f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cauchy_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & cauchy_(at::Tensor & self, double median=0, double sigma=1, c10::optional generator=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..91af25b478e5c42da940327473e247b708e93ba3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor cos(const at::Tensor & self); +TORCH_API at::Tensor & cos_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3321f5ddf88fd1f3a6ed219f494907bfa9374c21 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_cos_out : public at::meta::structured_cos { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor cos_nested(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/detach_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/detach_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7f877ae458e6b21e96535244ad27d100f6d93cc9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/detach_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor detach(const at::Tensor & self); +TORCH_API at::Tensor & detach_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eq_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eq_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a15f08555822b803f3b3400e9b0da39ea3247f43 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eq_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor eq(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & eq_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & eq_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor eq(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & eq_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & eq_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eye_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eye_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d720fde72796a593add5ddcfeda51ce8dcb532f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eye_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & eye_out(at::Tensor & out, int64_t n); +TORCH_API at::Tensor & eye_outf(int64_t n, at::Tensor & out); +TORCH_API at::Tensor & eye_symint_out(at::Tensor & out, c10::SymInt n); +TORCH_API at::Tensor & eye_symint_outf(c10::SymInt n, at::Tensor & out); +TORCH_API at::Tensor & eye_out(at::Tensor & out, int64_t n, int64_t m); +TORCH_API at::Tensor & eye_outf(int64_t n, int64_t m, at::Tensor & out); +TORCH_API at::Tensor & eye_symint_out(at::Tensor & out, c10::SymInt n, c10::SymInt m); +TORCH_API at::Tensor & eye_symint_outf(c10::SymInt n, c10::SymInt m, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..55e9d94a4a0f5fcaca0349e497297131742a91ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fft_rfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor fft_rfft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_rfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_rfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out); +TORCH_API at::Tensor & fft_rfft2_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_rfft2_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors.h new file mode 100644 index 0000000000000000000000000000000000000000..bc6c945c0854928cfd497af8a67b8477111aa9c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor +inline at::Tensor flatten_dense_tensors(at::TensorList tensors) { + return at::_ops::flatten_dense_tensors::call(tensors); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flip_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flip_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d2985370e92e9de01708199c0e842a5fb35ec175 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flip_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor flip(const at::Tensor & self, at::IntArrayRef dims); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fliplr_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fliplr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9ee54af05f3c9af5ebd95544f733b4e69c746e83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fliplr_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fliplr { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fliplr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fliplr(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2eaf088f78d2f5ffc8ab1476fb0744f2a420d36b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gelu_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gelu_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); +}; + +struct TORCH_API gelu_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gelu_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..57605e5bbd5ac9cd49d18cdad673f260b2358ff5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gelu_out { + using schema = at::Tensor & (const at::Tensor &, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gelu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::string_view approximate, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate, at::Tensor & out); +}; + +struct TORCH_API gelu_ { + using schema = at::Tensor & (at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gelu_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, c10::string_view approximate); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::string_view approximate); +}; + +struct TORCH_API gelu { + using schema = at::Tensor (const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gelu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gelu(Tensor self, *, str approximate='none') -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::string_view approximate); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish.h new file mode 100644 index 0000000000000000000000000000000000000000..92e43ac528eaa40fc8b8525ca9500a73ed2a1874 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::hardswish_out::call(self, out); +} +// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardswish_out::call(self, out); +} + +// aten::hardswish(Tensor self) -> Tensor +inline at::Tensor hardswish(const at::Tensor & self) { + return at::_ops::hardswish::call(self); +} + +// aten::hardswish_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & hardswish_(at::Tensor & self) { + return at::_ops::hardswish_::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..38b0c66ba237f53279c6efc9faeb71ae75a4afd0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +TORCH_API at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack.h new file mode 100644 index 0000000000000000000000000000000000000000..882e219c6e0983c8c45ab153c9bdcce8af85452a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hstack(Tensor[] tensors) -> Tensor +inline at::Tensor hstack(at::TensorList tensors) { + return at::_ops::hstack::call(tensors); +} + +// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hstack_out(at::Tensor & out, at::TensorList tensors) { + return at::_ops::hstack_out::call(tensors, out); +} +// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hstack_outf(at::TensorList tensors, at::Tensor & out) { + return at::_ops::hstack_out::call(tensors, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a10c3d3aead9f28e4d8b15b299c58b9ff91c4118 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..ee3f22fa3cc8c1521c9faa69395899a97951cee6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor +inline at::Tensor infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self) { + return at::_ops::infinitely_differentiable_gelu_backward::call(grad, self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr.h new file mode 100644 index 0000000000000000000000000000000000000000..1258cdbf1dac83be6f0bd5b53ab6f7d2f53feb9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::int_repr(Tensor self) -> Tensor +inline at::Tensor int_repr(const at::Tensor & self) { + return at::_ops::int_repr::call(self); +} + +// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & int_repr_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::int_repr_out::call(self, out); +} +// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & int_repr_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::int_repr_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft.h new file mode 100644 index 0000000000000000000000000000000000000000..97744e4cd48ac0f5494c2c197f01d4663d407519 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor +inline at::Tensor istft(const at::Tensor & self, int64_t n_fft, c10::optional hop_length=c10::nullopt, c10::optional win_length=c10::nullopt, const c10::optional & window={}, bool center=true, bool normalized=false, c10::optional onesided=c10::nullopt, c10::optional length=c10::nullopt, bool return_complex=false) { + return at::_ops::istft::call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..27ca501b266713d875588e95f7c3f6875f69eeea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight={}, const c10::optional & bias={}, double eps=1e-05, bool cudnn_enable=true); +TORCH_API at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight={}, const c10::optional & bias={}, double eps=1e-05, bool cudnn_enable=true); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..120f835c20223366de5637d7f85fc4db1acd264d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_cholesky_ex { + using schema = ::std::tuple (const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_cholesky_ex") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)") + static ::std::tuple call(const at::Tensor & self, bool upper, bool check_errors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors); +}; + +struct TORCH_API linalg_cholesky_ex_L { + using schema = ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_cholesky_ex") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "L") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)") + static ::std::tuple call(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_power_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_power_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1799fe278abc2de3a1ecd8e37a029cad8cd62e03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_power_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_matrix_power { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_power") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_power(Tensor self, int n) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n); +}; + +struct TORCH_API linalg_matrix_power_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_power") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..085c5c9f76013ae71668447cb7834c8a691f1832 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor logaddexp(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7ed9a0f0c5deee9f6990b053c7610f6e2562dde2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor matmul(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..979c887268865659ac2e1833c9711b8cd1794954 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_max_dim : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, bool keepdim); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c0d8ad619529da0ede5335542a72bb371da65c56 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool3d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d.h new file mode 100644 index 0000000000000000000000000000000000000000..283c3cd7752d89a5d6f49b7bc884fa22b82ce7ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) { + return at::_ops::max_unpool2d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), out); +} +namespace symint { + template ::value>> + at::Tensor & max_unpool2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) { + return at::_ops::max_unpool2d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), out); + } +} + +// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool2d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::max_unpool2d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), out); +} +namespace symint { + template ::value>> + at::Tensor & max_unpool2d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::max_unpool2d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), out); + } +} + +// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) { + return at::_ops::max_unpool2d_out::call(self, indices, output_size, out); +} +namespace symint { + template ::value>> + at::Tensor & max_unpool2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) { + return at::_ops::max_unpool2d_out::call(self, indices, output_size, out); + } +} + +// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool2d_symint_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::max_unpool2d_out::call(self, indices, output_size, out); +} +namespace symint { + template ::value>> + at::Tensor & max_unpool2d_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::max_unpool2d_out::call(self, indices, output_size, out); + } +} + +// aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor +inline at::Tensor max_unpool2d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) { + return at::_ops::max_unpool2d::call(self, indices, c10::fromIntArrayRefSlow(output_size)); +} +namespace symint { + template ::value>> + at::Tensor max_unpool2d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) { + return at::_ops::max_unpool2d::call(self, indices, c10::fromIntArrayRefSlow(output_size)); + } +} + +// aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor +inline at::Tensor max_unpool2d_symint(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) { + return at::_ops::max_unpool2d::call(self, indices, output_size); +} +namespace symint { + template ::value>> + at::Tensor max_unpool2d(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) { + return at::_ops::max_unpool2d::call(self, indices, output_size); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..bc84a23cb81139cc840dabe9286a18fdf6d83deb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_maximum : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d3ad7a46a1e6a1e32d4e737a62de0e0ba2c28842 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple> miopen_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aa75c67f2734a9773c180cc84616a3cadc6f9ba3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask); +TORCH_API ::std::tuple native_layer_norm_backward_symint(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/output_nr.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/output_nr.h new file mode 100644 index 0000000000000000000000000000000000000000..24bfdcd8f41758ce8ee437252447d5ac775b723a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/output_nr.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pinverse_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pinverse_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1ea30a2cc8bdd368b36431907c8fa899185ac3d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pinverse_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API pinverse { + using schema = at::Tensor (const at::Tensor &, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pinverse") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pinverse(Tensor self, float rcond=1e-15) -> Tensor") + static at::Tensor call(const at::Tensor & self, double rcond); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_copy_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..baae27176dc89777a16c69944b096195dd0aa83b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API row_indices_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::row_indices_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "row_indices_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API row_indices_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::row_indices_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sinc.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sinc.h new file mode 100644 index 0000000000000000000000000000000000000000..dc4117a43b34d21a0e678dc875f99f89771f6ffb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sinc.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sinc(Tensor self) -> Tensor +inline at::Tensor sinc(const at::Tensor & self) { + return at::_ops::sinc::call(self); +} + +// aten::sinc_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & sinc_(at::Tensor & self) { + return at::_ops::sinc_::call(self); +} + +// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sinc_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::sinc_out::call(self, out); +} +// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sinc_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::sinc_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sinh_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sinh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..835810fee0335b08735257b60afd96a3381f46a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sinh_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_sinh_out : public at::meta::structured_sinh { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor sinh_sparse(const at::Tensor & self); +TORCH_API at::Tensor & sinh_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sinh_sparse_(at::Tensor & self); +TORCH_API at::Tensor sinh_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & sinh_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sinh_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slice_copy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slice_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..2b217bf39011c62911f97188b501255efa2f141e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slice_copy.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +inline at::Tensor slice_copy(const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_copy_Tensor::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); +} +namespace symint { + template ::value>> + at::Tensor slice_copy(const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_copy_Tensor::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); + } +} + +// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +inline at::Tensor slice_copy_symint(const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_copy_Tensor::call(self, dim, start, end, step); +} +namespace symint { + template ::value>> + at::Tensor slice_copy(const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_copy_Tensor::call(self, dim, start, end, step); + } +} + +// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slice_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_copy_Tensor_out::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); +} +namespace symint { + template ::value>> + at::Tensor & slice_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_copy_Tensor_out::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); + } +} + +// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slice_copy_outf(const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, int64_t step, at::Tensor & out) { + return at::_ops::slice_copy_Tensor_out::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); +} +namespace symint { + template ::value>> + at::Tensor & slice_copy_outf(const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, int64_t step, at::Tensor & out) { + return at::_ops::slice_copy_Tensor_out::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); + } +} + +// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slice_copy_symint_out(at::Tensor & out, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_copy_Tensor_out::call(self, dim, start, end, step, out); +} +namespace symint { + template ::value>> + at::Tensor & slice_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_copy_Tensor_out::call(self, dim, start, end, step, out); + } +} + +// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slice_copy_symint_outf(const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, at::Tensor & out) { + return at::_ops::slice_copy_Tensor_out::call(self, dim, start, end, step, out); +} +namespace symint { + template ::value>> + at::Tensor & slice_copy_outf(const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, at::Tensor & out) { + return at::_ops::slice_copy_Tensor_out::call(self, dim, start, end, step, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l.h new file mode 100644 index 0000000000000000000000000000000000000000..fc12a5480a9ff6eabf3bc58e01a18338492621af --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor +inline at::Tensor special_laguerre_polynomial_l(const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_laguerre_polynomial_l::call(x, n); +} + +// aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor +inline at::Tensor special_laguerre_polynomial_l(const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_laguerre_polynomial_l_x_scalar::call(x, n); +} + +// aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor +inline at::Tensor special_laguerre_polynomial_l(const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_laguerre_polynomial_l_n_scalar::call(x, n); +} + +// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_laguerre_polynomial_l_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_laguerre_polynomial_l_out::call(x, n, out); +} +// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_laguerre_polynomial_l_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_laguerre_polynomial_l_out::call(x, n, out); +} + +// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_laguerre_polynomial_l_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_laguerre_polynomial_l_x_scalar_out::call(x, n, out); +} +// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_laguerre_polynomial_l_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_laguerre_polynomial_l_x_scalar_out::call(x, n, out); +} + +// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_laguerre_polynomial_l_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_laguerre_polynomial_l_n_scalar_out::call(x, n, out); +} +// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_laguerre_polynomial_l_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_laguerre_polynomial_l_n_scalar_out::call(x, n, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..33bb94f26294727d5c2b66e655dc36fb45a266a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_xlog1py { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_xlog1py") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_xlog1py(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API special_xlog1py_self_scalar { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_xlog1py") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "self_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor") + static at::Tensor call(const at::Scalar & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other); +}; + +struct TORCH_API special_xlog1py_other_scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_xlog1py") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "other_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API special_xlog1py_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_xlog1py") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API special_xlog1py_self_scalar_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_xlog1py") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "self_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API special_xlog1py_other_scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_xlog1py") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "other_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a7354a2b4db32afea9e4de1c748329f7503ae74d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & sum_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..74d30815471bcfe303fd85c097fa5f9fda4f88ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sym_constrain_range { + using schema = void (const at::Scalar &, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sym_constrain_range") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sym_constrain_range(Scalar size, *, int? min=None, int? max=None) -> ()") + static void call(const at::Scalar & size, c10::optional min, c10::optional max); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & size, c10::optional min, c10::optional max); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_chunk_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_chunk_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f6badf5a999fd1313c6a3e9530c6f188778ded0f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_chunk_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector unsafe_chunk(const at::Tensor & self, int64_t chunks, int64_t dim=0); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..127a5223fb25c64ae11a21e35b6c43eec6a1e3c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_upsample_bicubic2d_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c65c70a11d6248c378901e2540ae04c93b20cbab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor upsample_nearest1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor upsample_nearest1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales, at::Tensor & out); +TORCH_API at::Tensor & upsample_nearest1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c45b2ab1b28fd595f4fc1740bd94a29aac78e7e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_trilinear3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_trilinear3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & upsample_trilinear3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_trilinear3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7483fe46a0dd1ea45d5f11a63758a09a40a4334c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_trilinear3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_trilinear3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & upsample_trilinear3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_trilinear3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/where_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/where_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..204567ea097a53a4dcdcecad030420508afb11b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/where_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor where(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & where_out(at::Tensor & out, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & where_outf(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at