diff --git a/ckpts/universal/global_step20/zero/16.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/16.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..1d4941564ffcae5f53a9e57a647d4f493da9ab53 --- /dev/null +++ b/ckpts/universal/global_step20/zero/16.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4a0e52e4919b3248e1efc05708546a549ad43e9cdf77b00b5fea9cab97fb4db +size 33555612 diff --git a/ckpts/universal/global_step20/zero/16.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step20/zero/16.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..3b9fb61f81feb9c5a235571bf0a01e81026e491d --- /dev/null +++ b/ckpts/universal/global_step20/zero/16.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45b74e1be0931535877e3bc7f55263afcf83d6f79c0a26aa332791c265c318e8 +size 33555533 diff --git a/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..58684ac65aa3b10076f044ecaff42125e4d97b7c --- /dev/null +++ b/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f74426d11a270d0ed394be6fa518152bd4d64c809f4cbc80b43364ea8e8512b4 +size 33555627 diff --git a/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..8f2baa75f7b4e668dc20bb7cd1d01e712e37f95f --- /dev/null +++ b/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3ad016bc256ee26170b92ca14ad34b14abda1050b13fbe150cf8bcd14cd1846 +size 33555533 diff --git a/ckpts/universal/global_step20/zero/4.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/4.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..4ba77168a50b9bbe4b9bfa581b245f103d063506 --- /dev/null +++ b/ckpts/universal/global_step20/zero/4.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:432cbd6f62b7afef9e9d03fe3d3527cc211b29d5f6266c02b7be75989aebed76 +size 33555627 diff --git a/ckpts/universal/global_step20/zero/9.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step20/zero/9.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..fccea1cdecf1047b866d52fd6f84e0d8f9e34fa1 --- /dev/null +++ b/ckpts/universal/global_step20/zero/9.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04ac372c137fd3040a22ad3f7781a0dfc3c64e4f554d5af5dc58a0657bd2bb29 +size 9293 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..628c62691e07802581d8dd42c59cc851810a94f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _convert_indices_from_csr_to_coo(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false); +TORCH_API at::Tensor & _convert_indices_from_csr_to_coo_out(at::Tensor & out, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false); +TORCH_API at::Tensor & _convert_indices_from_csr_to_coo_outf(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..68da8d0bf7334f9883c269b9322c67306d7ce4c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32); +TORCH_API at::Tensor _convolution_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32); +TORCH_API at::Tensor & _convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32); +TORCH_API at::Tensor & _convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out); +TORCH_API at::Tensor & _convolution_symint_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32); +TORCH_API at::Tensor & _convolution_symint_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_copy_from.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_copy_from.h new file mode 100644 index 0000000000000000000000000000000000000000..609dd43069510f7c72c33c863a83fec6c98ee067 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_copy_from.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor +inline at::Tensor _copy_from(const at::Tensor & self, const at::Tensor & dst, bool non_blocking=false) { + return at::_ops::_copy_from::call(self, dst, non_blocking); +} + +// aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _copy_from_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & dst, bool non_blocking=false) { + return at::_ops::_copy_from_out::call(self, dst, non_blocking, out); +} +// aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _copy_from_outf(const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out) { + return at::_ops::_copy_from_out::call(self, dst, non_blocking, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0b64f1804ede7042c313f091a017dc65adda2083 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_log2_out(at::TensorList self, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_log2_slow(at::TensorList self); +TORCH_API void foreach_tensor_log2_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_log2_cuda(at::TensorList self); +TORCH_API void foreach_tensor_log2_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_norm.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..02b354ba5e2fd5adfcd08ea54fb49fb1fecfa7b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_norm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[] +inline ::std::vector _foreach_norm(at::TensorList self, const at::Scalar & ord=2) { + return at::_ops::_foreach_norm_Scalar::call(self, ord); +} + +// aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> () +inline void _foreach_norm_out(at::TensorList out, at::TensorList self, const at::Scalar & ord=2) { + return at::_ops::_foreach_norm_Scalar_out::call(self, ord, out); +} +// aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> () +inline void _foreach_norm_outf(at::TensorList self, const at::Scalar & ord, at::TensorList out) { + return at::_ops::_foreach_norm_Scalar_out::call(self, ord, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_for_size_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_for_size_native.h new file mode 100644 index 0000000000000000000000000000000000000000..927633ade2492489ac7834af347c86e4a7d5b450 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_for_size_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _functional_sym_constrain_range_for_size(const at::Scalar & size, c10::optional min, c10::optional max, const at::Tensor & dep_token); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged.h new file mode 100644 index 0000000000000000000000000000000000000000..8931cde279c606a91db03e8c8549c3f3a38e3061 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_view_from_jagged(Tensor(a) self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor(a) +inline at::Tensor _nested_view_from_jagged(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional & lengths={}, int64_t ragged_idx=1) { + return at::_ops::_nested_view_from_jagged::call(self, offsets, dummy, lengths, ragged_idx); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_packed_sequence_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_packed_sequence_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b04f1f42d3849e797a717ede53b9ba0a1103dae7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_packed_sequence_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _pad_packed_sequence(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_rowwise_prune_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_rowwise_prune_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..be3daf5ff8cdb9bb96a6753d24e0c688f7b54e2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_rowwise_prune_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple _rowwise_prune(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_initialize_state_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_initialize_state_native.h new file mode 100644 index 0000000000000000000000000000000000000000..159cd9d6c96d6aeaceee8620d0c64f570289c9c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_initialize_state_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _sobol_engine_initialize_state_(at::Tensor & self, int64_t dimension); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e1b58af2e3971a63cc4d697ddf89bd0558280478 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced=c10::nullopt); +TORCH_API at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced, at::Tensor & out); +TORCH_API at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced=c10::nullopt); +TORCH_API at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_outf(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..65943d90ab306062ff4e0fd16809c283a931b4f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _sparse_log_softmax(const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor _sparse_log_softmax(const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_check_tensor_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_check_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8c2078e5a9c44223082be7984efda7d2194c5297 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_check_tensor_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _test_check_tensor(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsr_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsr_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e9549aa51d2747be887ba7bba1ec101c0ce5b02f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsr_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/abs_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/abs_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..00e5a7b1c264ce43bca5e01ecbe88a563d14c8e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/abs_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4491fdd247be48c7d52b96254591a22014c558da --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/align_to_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/align_to_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8dbdcbbe1f8884b1d30243fc24783e073fe1827e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/align_to_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API align_to { + using schema = at::Tensor (const at::Tensor &, at::DimnameList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::align_to") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, at::DimnameList names); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList names); +}; + +struct TORCH_API align_to_ellipsis_idx { + using schema = at::Tensor (const at::Tensor &, at::DimnameList, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::align_to") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ellipsis_idx") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arcsin_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arcsin_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..04ad1809fc686618f5d1808ee5ec6779e9aaf4e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arcsin_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor arcsin(const at::Tensor & self); +TORCH_API at::Tensor & arcsin_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & arcsin_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & arcsin_(at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arctan2_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arctan2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2ecbc814d9efbc3d26757480dd07bef91f5a7ac6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arctan2_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API arctan2 { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arctan2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arctan2(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API arctan2_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arctan2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API arctan2_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arctan2_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cc74d29b879eb8376c06995774289e328f98f5f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API arctanh { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arctanh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arctanh(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API arctanh_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arctanh_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arctanh_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API arctanh_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arctanh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/can_cast_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/can_cast_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9bae7cf7f363bc59068f9e5be61997c86b2cb04b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/can_cast_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool can_cast(at::ScalarType from, at::ScalarType to); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cartesian_prod.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cartesian_prod.h new file mode 100644 index 0000000000000000000000000000000000000000..48f62afcd6d49ede929d02fa499d3a689f4a409b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cartesian_prod.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cartesian_prod(Tensor[] tensors) -> Tensor +inline at::Tensor cartesian_prod(at::TensorList tensors) { + return at::_ops::cartesian_prod::call(tensors); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..24d08726256c26db2704846c31e874d91886c8b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor col_indices_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_overrideable_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_overrideable_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..636b6b28f33123df81e7ee30559c2c4ea03ad999 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_overrideable_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API convolution_overrideable { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, c10::SymIntArrayRef, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::convolution_overrideable") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups); +}; + +struct TORCH_API convolution_overrideable_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, c10::SymIntArrayRef, c10::SymInt, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::convolution_overrideable") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6e1d10c6a8e0e333dccceadcb4c33eafa71d0492 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor cosh(const at::Tensor & self); +TORCH_API at::Tensor & cosh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & cosh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & cosh_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..55c538a66fbd3a91f03cd2f66d7597f4c8345327 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor) +inline ::std::tuple cudnn_batch_norm_backward(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const at::Tensor & reserveSpace) { + return at::_ops::cudnn_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace); +} + +// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple cudnn_batch_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const at::Tensor & reserveSpace) { + return at::_ops::cudnn_batch_norm_backward_out::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2); +} +// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple cudnn_batch_norm_backward_outf(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::cudnn_batch_norm_backward_out::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e9546d2f9deeeb04c0f3720b3667877da89227b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API detach_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::detach_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "detach_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API detach_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::detach_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/digamma.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/digamma.h new file mode 100644 index 0000000000000000000000000000000000000000..b5a6c9a0cce2b6a3ac65f1233ad8ecd0d6795cb3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/digamma.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & digamma_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::digamma_out::call(self, out); +} +// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & digamma_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::digamma_out::call(self, out); +} + +// aten::digamma(Tensor self) -> Tensor +inline at::Tensor digamma(const at::Tensor & self) { + return at::_ops::digamma::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2b611521f414ccc60a04d757e44befb0575ba61d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fmod_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API fmod_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmod.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API fmod__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmod_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API fmod_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API fmod_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmod.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API fmod__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmod_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e0f0f8337346d2ccb6c4efd91defec36e6bf1051 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1ff72fcfcbd7399a369e7d286071fb0c4ae7fa89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3be116ba230f4f3b26f27741a436b5a163d6c03d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f89cc73bb04dad56225b164d77689259dab35e1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_softmax_int { + using schema = at::Tensor (const at::Tensor &, int64_t, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype); +}; + +struct TORCH_API log_softmax_int_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out); +}; + +struct TORCH_API log_softmax_Dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ff8b8d60632394a8aee5908179e552ede62f34fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor logaddexp2(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6344076962900da0611b0420b222a3c29a8a215a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logcumsumexp(const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & logcumsumexp_out(const at::Tensor & self, int64_t dim, at::Tensor & out); +TORCH_API at::Tensor logcumsumexp(const at::Tensor & self, at::Dimname dim); +TORCH_API at::Tensor & logcumsumexp_out(const at::Tensor & self, at::Dimname dim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet.h new file mode 100644 index 0000000000000000000000000000000000000000..78ee6728644cec85be91df49a9be916f9415e94a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logdet(Tensor self) -> Tensor +inline at::Tensor logdet(const at::Tensor & self) { + return at::_ops::logdet::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8f79c0d5229b0e0a3892f937aca80b2403778577 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logit(const at::Tensor & self, c10::optional eps=c10::nullopt); +TORCH_API at::Tensor & logit_out(const at::Tensor & self, c10::optional eps, at::Tensor & out); +TORCH_API at::Tensor & logit_(at::Tensor & self, c10::optional eps=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..768b3b802d3d29a4364f556c0fe349b5b99c9d44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API matrix_exp_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::matrix_exp_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "matrix_exp_backward(Tensor self, Tensor grad) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..cdd9b683764ef25f126062b4eef3a0d12bbd6f52 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) +inline ::std::tuple miopen_batch_norm(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::miopen_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); +} + +// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple miopen_batch_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::miopen_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2); +} +// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple miopen_batch_norm_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::miopen_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution.h new file mode 100644 index 0000000000000000000000000000000000000000..e3472f660a718ef147991bdb76c29f11766956fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor +inline at::Tensor miopen_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic); +} +namespace symint { + template ::value>> + at::Tensor miopen_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic); + } +} + +// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor +inline at::Tensor miopen_convolution_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); +} +namespace symint { + template ::value>> + at::Tensor miopen_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); + } +} + +// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & miopen_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); +} +namespace symint { + template ::value>> + at::Tensor & miopen_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); + } +} + +// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & miopen_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); +} +namespace symint { + template ::value>> + at::Tensor & miopen_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, benchmark, deterministic, out); + } +} + +// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & miopen_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); +} +namespace symint { + template ::value>> + at::Tensor & miopen_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } +} + +// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & miopen_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); +} +namespace symint { + template ::value>> + at::Tensor & miopen_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..b16546905bc69c07e1617ff97543d405f17b91e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) +inline ::std::tuple> miopen_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::miopen_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); +} + +// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () +inline void miopen_rnn_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::miopen_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); +} +// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () +inline void miopen_rnn_backward_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + return at::_ops::miopen_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_reorder_conv2d_weight.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_reorder_conv2d_weight.h new file mode 100644 index 0000000000000000000000000000000000000000..c62fe321044dc0f2cb34578278b5bc29dd3e7c04 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_reorder_conv2d_weight.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor +inline at::Tensor mkldnn_reorder_conv2d_weight(const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=c10::nullopt) { + return at::_ops::mkldnn_reorder_conv2d_weight::call(self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*input_size)) : c10::nullopt); +} +namespace symint { + template ::value>> + at::Tensor mkldnn_reorder_conv2d_weight(const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=c10::nullopt) { + return at::_ops::mkldnn_reorder_conv2d_weight::call(self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*input_size)) : c10::nullopt); + } +} + +// aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor +inline at::Tensor mkldnn_reorder_conv2d_weight_symint(const at::Tensor & self, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1, at::OptionalSymIntArrayRef input_size=c10::nullopt) { + return at::_ops::mkldnn_reorder_conv2d_weight::call(self, padding, stride, dilation, groups, input_size); +} +namespace symint { + template ::value>> + at::Tensor mkldnn_reorder_conv2d_weight(const at::Tensor & self, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1, at::OptionalSymIntArrayRef input_size=c10::nullopt) { + return at::_ops::mkldnn_reorder_conv2d_weight::call(self, padding, stride, dilation, groups, input_size); + } +} + +// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_reorder_conv2d_weight_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=c10::nullopt) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::call(self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*input_size)) : c10::nullopt, out); +} +namespace symint { + template ::value>> + at::Tensor & mkldnn_reorder_conv2d_weight_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=c10::nullopt) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::call(self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*input_size)) : c10::nullopt, out); + } +} + +// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_reorder_conv2d_weight_outf(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::call(self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*input_size)) : c10::nullopt, out); +} +namespace symint { + template ::value>> + at::Tensor & mkldnn_reorder_conv2d_weight_outf(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::call(self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*input_size)) : c10::nullopt, out); + } +} + +// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_reorder_conv2d_weight_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1, at::OptionalSymIntArrayRef input_size=c10::nullopt) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::call(self, padding, stride, dilation, groups, input_size, out); +} +namespace symint { + template ::value>> + at::Tensor & mkldnn_reorder_conv2d_weight_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1, at::OptionalSymIntArrayRef input_size=c10::nullopt) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::call(self, padding, stride, dilation, groups, input_size, out); + } +} + +// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_reorder_conv2d_weight_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::call(self, padding, stride, dilation, groups, input_size, out); +} +namespace symint { + template ::value>> + at::Tensor & mkldnn_reorder_conv2d_weight_outf(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::call(self, padding, stride, dilation, groups, input_size, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/movedim.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/movedim.h new file mode 100644 index 0000000000000000000000000000000000000000..c1f92095ed91b1f582144bbb3208a6e2dbbcb092 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/movedim.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) +inline at::Tensor movedim(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { + return at::_ops::movedim_intlist::call(self, source, destination); +} + +// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) +inline at::Tensor movedim(const at::Tensor & self, int64_t source, int64_t destination) { + return at::_ops::movedim_int::call(self, source, destination); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multinomial_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multinomial_native.h new file mode 100644 index 0000000000000000000000000000000000000000..05a252db2f8a6757d90676596e2ac12aa9962d13 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/multinomial_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor multinomial(const at::Tensor & self, int64_t num_samples, bool replacement=false, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & multinomial_out(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional generator, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..aa950ae077158805edd176a52976b6f311364c9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API nll_loss_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, c10::SymInt, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nll_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); +}; + +struct TORCH_API nll_loss_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, c10::SymInt, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nll_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/outer_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/outer_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..003f5504199d476d879abb58e55317a9c07493d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/outer_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor outer(const at::Tensor & self, const at::Tensor & vec2); +TORCH_API at::Tensor & outer_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2); +TORCH_API at::Tensor & outer_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..71456b97c5c3c871e009f80cf4250402efd76dfa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor pixel_shuffle(const at::Tensor & self, int64_t upscale_factor); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6dced6271caf885396711390b8b058c505ecc5e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & quantize_per_channel_out(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out); +TORCH_API at::Tensor quantize_per_channel(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool2d.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool2d.h new file mode 100644 index 0000000000000000000000000000000000000000..13cce5d369e0491b0864e92855701fff2cbdc348 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool2d.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor +inline at::Tensor quantized_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode); +} + +// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantized_max_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool2d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out); +} +// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantized_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::quantized_max_pool2d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool2d_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool2d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a7959a1545cca5ce4b912363f2bb60c0dc5600c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool2d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & quantized_max_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API at::Tensor & quantized_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/real_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/real_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1171869e1f0759a2e5712c1b2d5270b959115ec8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/real_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor real(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/relu_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/relu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..91f8c9649d8ac0f2e5172c969bf48a6eefcddca3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/relu_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API relu { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "relu(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API relu_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::relu_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "relu_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API relu_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6bf381dfb06d1ab7c233eca11660dbb2ec017b1c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor renorm(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); +TORCH_API at::Tensor & renorm_(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d.h new file mode 100644 index 0000000000000000000000000000000000000000..fdeefab11dab80f2f4dfe7143180d85311a1209a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & replication_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad1d_out::call(self, c10::fromIntArrayRefSlow(padding), out); +} +namespace symint { + template ::value>> + at::Tensor & replication_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad1d_out::call(self, c10::fromIntArrayRefSlow(padding), out); + } +} + +// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & replication_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad1d_out::call(self, c10::fromIntArrayRefSlow(padding), out); +} +namespace symint { + template ::value>> + at::Tensor & replication_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad1d_out::call(self, c10::fromIntArrayRefSlow(padding), out); + } +} + +// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & replication_pad1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad1d_out::call(self, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & replication_pad1d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad1d_out::call(self, padding, out); + } +} + +// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & replication_pad1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad1d_out::call(self, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & replication_pad1d_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad1d_out::call(self, padding, out); + } +} + +// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor +inline at::Tensor replication_pad1d(const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad1d::call(self, c10::fromIntArrayRefSlow(padding)); +} +namespace symint { + template ::value>> + at::Tensor replication_pad1d(const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad1d::call(self, c10::fromIntArrayRefSlow(padding)); + } +} + +// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor +inline at::Tensor replication_pad1d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad1d::call(self, padding); +} +namespace symint { + template ::value>> + at::Tensor replication_pad1d(const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad1d::call(self, padding); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a43cb2d9bf18f98bcfc9068fe76099759e4b232c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor replication_pad2d(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor replication_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & replication_pad2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8312c9651eeac7acafa387dfcc9a516b892ea57b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rrelu_with_noise_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rrelu_with_noise") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator, at::Tensor & out); +}; + +struct TORCH_API rrelu_with_noise { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rrelu_with_noise") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator); +}; + +struct TORCH_API rrelu_with_noise_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rrelu_with_noise_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..ff422f039c813df7d80d45affffe02e19332a435 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor +inline at::Tensor scatter_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) { + return at::_ops::scatter_reduce_two::call(self, dim, index, src, reduce, include_self); +} + +// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) { + return at::_ops::scatter_reduce_two_out::call(self, dim, index, src, reduce, include_self, out); +} +// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) { + return at::_ops::scatter_reduce_two_out::call(self, dim, index, src, reduce, include_self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_native.h new file mode 100644 index 0000000000000000000000000000000000000000..49fa3d1d59ef131e919900d3016140c89174c9ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & segment_reduce_out(const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial, at::Tensor & out); +TORCH_API at::Tensor segment_reduce_kernel(const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths={}, const c10::optional & indices={}, const c10::optional & offsets={}, int64_t axis=0, bool unsafe=false, const c10::optional & initial=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d6fec4aecfcb45e2bc1f3d685b78710a1b887fe6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sgn { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sgn") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sgn(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API sgn_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sgn_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sgn_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API sgn_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sgn") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/silu_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/silu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..944e3349c01b42cf0231df775cb6a16705eb2488 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/silu_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor silu(const at::Tensor & self); +TORCH_API at::Tensor & silu_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & silu_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & silu_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sinh_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sinh_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d00485421d05ab81be405ac7c494af87af30ed7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sinh_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor sinh(const at::Tensor & self); +TORCH_API at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sinh_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca0ff2956eecef067a2a4ba942cc60425788e8af --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor slow_conv_transpose2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); +TORCH_API at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out); +TORCH_API at::Tensor & slow_conv_transpose2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); +TORCH_API at::Tensor & slow_conv_transpose2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b4824333ef112b5ac7643569e435d406e15172dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold); +TORCH_API at::Tensor & softplus_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold); +TORCH_API at::Tensor & softplus_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..46752d2c17034617d6c46f3767e9a1b47d088d1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor softshrink(const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & softshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & softshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sort_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sort_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e57698176bb7ed3dbb945938636d615ba363b9f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sort_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple sort(const at::Tensor & self, c10::optional stable, int64_t dim=-1, bool descending=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y0_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y0_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6e83ac250bee459d1d3c8848c3ed451418743a8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y0_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_bessel_y0_out : public at::meta::structured_special_bessel_y0 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_he_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_he_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8ca299f357337a07ba43c8eaf7c6d07950ac122c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_he_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_hermite_polynomial_he { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_hermite_polynomial_he") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n); +}; + +struct TORCH_API special_hermite_polynomial_he_x_scalar { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_hermite_polynomial_he") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor") + static at::Tensor call(const at::Scalar & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n); +}; + +struct TORCH_API special_hermite_polynomial_he_n_scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_hermite_polynomial_he") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Scalar & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n); +}; + +struct TORCH_API special_hermite_polynomial_he_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_hermite_polynomial_he") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_hermite_polynomial_he_x_scalar_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_hermite_polynomial_he") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_hermite_polynomial_he_n_scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_hermite_polynomial_he") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7146ca6a736b1bd7c5df0541272b1135786ede8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_modified_bessel_i0(const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_i0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_i0_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b93c2707f7ebe639fd7c45929061ee1e5cdf82cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_ndtri { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_ndtri") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_ndtri(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_ndtri_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_ndtri") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1.h new file mode 100644 index 0000000000000000000000000000000000000000..0ce593a9d8d12d7ac0365d1e8d27146025ce7f9e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor +inline at::Tensor special_scaled_modified_bessel_k1(const at::Tensor & x) { + return at::_ops::special_scaled_modified_bessel_k1::call(x); +} + +// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_scaled_modified_bessel_k1_out(at::Tensor & out, const at::Tensor & x) { + return at::_ops::special_scaled_modified_bessel_k1_out::call(x, out); +} +// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_scaled_modified_bessel_k1_outf(const at::Tensor & x, at::Tensor & out) { + return at::_ops::special_scaled_modified_bessel_k1_out::call(x, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sspaddmm.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sspaddmm.h new file mode 100644 index 0000000000000000000000000000000000000000..02573affffb8a5782817c0dbc43621e20f21ba46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sspaddmm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor sspaddmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sspaddmm::call(self, mat1, mat2, beta, alpha); +} + +// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sspaddmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sspaddmm_out::call(self, mat1, mat2, beta, alpha, out); +} +// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sspaddmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::sspaddmm_out::call(self, mat1, mat2, beta, alpha, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/take_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/take_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1ea5b9b93335c3f1b92ac3701f83ac5a2c79942e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/take_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor take(const at::Tensor & self, const at::Tensor & index); +TORCH_API at::Tensor & take_out(const at::Tensor & self, const at::Tensor & index, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..63c2733ef14f9a567bbe9f108e2f4cbb8c323bd5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API trapz_x { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::trapz") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor") + static at::Tensor call(const at::Tensor & y, const at::Tensor & x, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim); +}; + +struct TORCH_API trapz_dx { + using schema = at::Tensor (const at::Tensor &, double, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::trapz") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dx") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor") + static at::Tensor call(const at::Tensor & y, double dx, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, double dx, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9c6ce1d6d9f7ca01aa4b908fda0590e63c28d4ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor unsqueeze(const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor unsqueeze_nested(const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor unsqueeze_sparse(const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor unsqueeze_quantized(const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & unsqueeze_(at::Tensor & self, int64_t dim); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b5c6aff8d7028e3070bd3118feb291746d01cfc3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor upsample_nearest2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_nearest2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at