diff --git a/ckpts/universal/global_step20/zero/13.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/13.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..31efc4a023a313f7121efcd8ab6846cefd68eb1d --- /dev/null +++ b/ckpts/universal/global_step20/zero/13.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1b700716d9bf22f3201a39e4e33a5099557ad4b4d384840cc52983748617720 +size 16778411 diff --git a/ckpts/universal/global_step20/zero/13.attention.dense.weight/fp32.pt b/ckpts/universal/global_step20/zero/13.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..5c7edd45edb4b5935b57250651e282349facb270 --- /dev/null +++ b/ckpts/universal/global_step20/zero/13.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65fcbbee9265268864f47afe33e9ac94b1c941ac82da6a12065cad89bb7b79c2 +size 16778317 diff --git a/ckpts/universal/global_step20/zero/4.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step20/zero/4.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..a3d888d20fa0cac15a961c1352ef6087c01f47f7 --- /dev/null +++ b/ckpts/universal/global_step20/zero/4.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6195377c894d5852ae678b05472c5b45022951957df9c31c84814787fd4947c6 +size 9293 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4a4c33cb1969a86a38963763fddee47e09dcdbd8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _batch_norm_impl_index(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1caa0989425a2a374613794d0044f6edbbb60c29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cdist_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cdist_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor") + static at::Tensor call(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist); +}; + +struct TORCH_API _cdist_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cdist_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_init_dropout_state_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_init_dropout_state_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0676b67d507be803428500b0483daf0466d5ff37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_init_dropout_state_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _cudnn_init_dropout_state_out(double dropout, bool train, int64_t dropout_seed, at::Tensor & out); +TORCH_API at::Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..795f945baba7c0a86aeca1e4a47f4e5b58eec928 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) +inline ::std::tuple> _cudnn_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple> _cudnn_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask); + } +} + +// aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) +inline ::std::tuple> _cudnn_rnn_backward_symint(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple> _cudnn_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); + } +} + +// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () +inline void _cudnn_rnn_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3); +} +namespace symint { + template ::value>> + void _cudnn_rnn_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3); + } +} + +// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () +inline void _cudnn_rnn_backward_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3); +} +namespace symint { + template ::value>> + void _cudnn_rnn_backward_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3); + } +} + +// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () +inline void _cudnn_rnn_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); +} +namespace symint { + template ::value>> + void _cudnn_rnn_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } +} + +// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () +inline void _cudnn_rnn_backward_symint_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); +} +namespace symint { + template ::value>> + void _cudnn_rnn_backward_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2c_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2c_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dac223687c607b761ff09e7671d3cc937dee3690 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2c_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _fft_c2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward); +TORCH_API at::Tensor _fft_c2c_symint(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward); +TORCH_API at::Tensor & _fft_c2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward); +TORCH_API at::Tensor & _fft_c2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out); +TORCH_API at::Tensor & _fft_c2c_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward); +TORCH_API at::Tensor & _fft_c2c_symint_outf(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_add_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_add_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..16d1eaa33130d680e5770eccd8ed7fc6d160e8cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_add_compositeexplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _foreach_add_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar); +TORCH_API void _foreach_add_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out); +TORCH_API void _foreach_add_out(at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1); +TORCH_API void _foreach_add_outf(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out); +TORCH_API void _foreach_add_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars); +TORCH_API void _foreach_add_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out); +TORCH_API void _foreach_add_out(at::TensorList out, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API void _foreach_add_outf(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil.h new file mode 100644 index 0000000000000000000000000000000000000000..141968c355806bfa45dbfc7beabd1657d4f756ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_ceil(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_ceil(at::TensorList self) { + return at::_ops::_foreach_ceil::call(self); +} + +// aten::_foreach_ceil_(Tensor(a!)[] self) -> () +inline void _foreach_ceil_(at::TensorList self) { + return at::_ops::_foreach_ceil_::call(self); +} + +// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_ceil_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_ceil_out::call(self, out); +} +// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_ceil_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_ceil_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cosh_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cosh_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fdf4dcdc4e5c8cbcacf983a6f9b6d3139271da01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cosh_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_cosh(at::TensorList self); +TORCH_API void _foreach_cosh_(at::TensorList self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_assert_async_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_assert_async_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6ee93d199e9b80d27161542a797d869e3a6c6e02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_assert_async_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _functional_assert_async_msg_cpu(const at::Tensor & self, c10::string_view assert_msg, const at::Tensor & dep_token); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_tensors_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_tensors_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b3c813aa7e4c033dafb925515cce8b64f11a8afc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_tensors_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _histogramdd_from_bin_tensors { + using schema = at::Tensor (const at::Tensor &, at::TensorList, const c10::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_histogramdd_from_bin_tensors") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::TensorList bins, const c10::optional & weight, bool density); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, const c10::optional & weight, bool density); +}; + +struct TORCH_API _histogramdd_from_bin_tensors_out { + using schema = at::Tensor & (const at::Tensor &, at::TensorList, const c10::optional &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_histogramdd_from_bin_tensors") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::TensorList bins, const c10::optional & weight, bool density, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, const c10::optional & weight, bool density, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..b5565b6f744900f94fee0b8196d114e3145a7a3e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!) +inline at::Tensor & _index_put_impl_(at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) { + return at::_ops::_index_put_impl_::call(self, indices, values, accumulate, unsafe); +} + +// aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _index_put_impl_out(at::Tensor & out, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) { + return at::_ops::_index_put_impl_out::call(self, indices, values, accumulate, unsafe, out); +} +// aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _index_put_impl_outf(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) { + return at::_ops::_index_put_impl_out::call(self, indices, values, accumulate, unsafe, out); +} + +// aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor +inline at::Tensor _index_put_impl(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) { + return at::_ops::_index_put_impl::call(self, indices, values, accumulate, unsafe); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1bd3e5d06d8ef622644e261b6fc91f18d3f338f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _make_dual_copy_out(const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out); +TORCH_API at::Tensor _make_dual_copy(const at::Tensor & primal, const at::Tensor & tangent, int64_t level); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a9c0d7b840e402186378c89844a5871fe695d28d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _neg_view_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & _neg_view_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pin_memory_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pin_memory_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1aa883f6682ccf181a8dbe048f25888c38782a8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pin_memory_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _pin_memory_out(const at::Tensor & self, c10::optional device, at::Tensor & out); +TORCH_API at::Tensor _pin_memory_cuda(const at::Tensor & self, c10::optional device=c10::nullopt); +TORCH_API at::Tensor _pin_memory_nested(const at::Tensor & self, c10::optional device=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_saturate_weight_to_fp16_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_saturate_weight_to_fp16_native.h new file mode 100644 index 0000000000000000000000000000000000000000..04f029c77f8af446a192e6ca0da2cd652fcfff6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_saturate_weight_to_fp16_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _saturate_weight_to_fp16(const at::Tensor & weight); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..718becef2c047f28f3b683665dbdf06cd33b748e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _sparse_sum(const at::Tensor & self); +TORCH_API at::Tensor _sparse_sum(const at::Tensor & self, at::ScalarType dtype); +TORCH_API at::Tensor _sparse_sum(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma.h new file mode 100644 index 0000000000000000000000000000000000000000..01ced1d19f99d8e761938c9cd17f9b3a1e399ebb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor +inline at::Tensor _standard_gamma(const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_standard_gamma::call(self, generator); +} + +// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _standard_gamma_out(at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_standard_gamma_out::call(self, generator, out); +} +// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _standard_gamma_outf(const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::_standard_gamma_out::call(self, generator, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_floatlist_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_floatlist_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2078cc72a196c2cd09aeeae1da429b987263803e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_floatlist_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _test_optional_floatlist { + using schema = at::Tensor (const at::Tensor &, c10::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_optional_floatlist") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor") + static at::Tensor call(const at::Tensor & values, c10::optional> addends); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, c10::optional> addends); +}; + +struct TORCH_API _test_optional_floatlist_out { + using schema = at::Tensor & (const at::Tensor &, c10::optional>, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_optional_floatlist") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & values, c10::optional> addends, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, c10::optional> addends, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csr_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csr_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d92b4dd06aa6537bd33b4b805f49ff11c15460da --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csr_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _to_sparse_csr(const at::Tensor & self, c10::optional dense_dim=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unique.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unique.h new file mode 100644 index 0000000000000000000000000000000000000000..8eedeeb9fddb7c2c7f4a67cabfa4127adfb69069 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unique.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor) +inline ::std::tuple _unique(const at::Tensor & self, bool sorted=true, bool return_inverse=false) { + return at::_ops::_unique::call(self, sorted, return_inverse); +} + +// aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _unique_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, bool sorted=true, bool return_inverse=false) { + return at::_ops::_unique_out::call(self, sorted, return_inverse, out0, out1); +} +// aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _unique_outf(const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_unique_out::call(self, sorted, return_inverse, out0, out1); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..9f2a8d8a9cc825541d8b77d8cf7fa17ac0d4c37a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); + } +} + +// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); + } +} + +// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_bicubic2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } +} + +// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_bicubic2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } +} + +// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w); + } +} + +// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor _upsample_bicubic2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_differentiable_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_differentiable_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..51b61dda1b5d60c9fcba96ec544ed7d1be16c649 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_differentiable_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _weight_norm_differentiable_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_weight_norm_differentiable_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ffce8f8fa2733cd380611b0fe3d5333d61e90f0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector align_tensors(at::TensorList tensors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amin_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amin_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..453b8d4031aed73fad2e503ef9edec8a9ce39a4a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/amin_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor amin(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); +TORCH_API at::Tensor & amin_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); +TORCH_API at::Tensor & amin_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8600211a8c3bfdc8df17740bfd4ea54d6b490e86 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor argmax(const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan_native.h new file mode 100644 index 0000000000000000000000000000000000000000..39e8d5de2af83f2b53b1fda11242515d7b471cce --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_atan_out : public at::meta::structured_atan { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor atan_sparse(const at::Tensor & self); +TORCH_API at::Tensor & atan_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & atan_sparse_(at::Tensor & self); +TORCH_API at::Tensor atan_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & atan_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & atan_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atanh.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atanh.h new file mode 100644 index 0000000000000000000000000000000000000000..ef8f623d81ced109acccd04e7f1420cdbff3ad4a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atanh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::atanh(Tensor self) -> Tensor +inline at::Tensor atanh(const at::Tensor & self) { + return at::_ops::atanh::call(self); +} + +// aten::atanh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & atanh_(at::Tensor & self) { + return at::_ops::atanh_::call(self); +} + +// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::atanh_out::call(self, out); +} +// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::atanh_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a27b61d73c6b559859f36ca867b21f6f23b06127 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized.h new file mode 100644 index 0000000000000000000000000000000000000000..a2c9bfefe21819f3231838b331f4b0a5dce16f22 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor) +inline ::std::tuple choose_qparams_optimized(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) { + return at::_ops::choose_qparams_optimized::call(input, numel, n_bins, ratio, bit_width); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/chunk_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/chunk_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ff633253eadc3e75df3b169c63acdc397f976a92 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/chunk_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector chunk(const at::Tensor & self, int64_t chunks, int64_t dim=0); +TORCH_API ::std::vector chunk_nested_tensor(const at::Tensor & self, int64_t chunks, int64_t dim=0); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/col2im.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/col2im.h new file mode 100644 index 0000000000000000000000000000000000000000..1487ae16e964103da6b5d79f46c31333b79415da --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/col2im.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out); +} +namespace symint { + template ::value>> + at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out); + } +} + +// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & col2im_outf(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out); +} +namespace symint { + template ::value>> + at::Tensor & col2im_outf(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out); + } +} + +// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & col2im_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out); +} +namespace symint { + template ::value>> + at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out); + } +} + +// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & col2im_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out); +} +namespace symint { + template ::value>> + at::Tensor & col2im_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out); + } +} + +// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor +inline at::Tensor col2im(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride); +} +namespace symint { + template ::value>> + at::Tensor col2im(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride); + } +} + +// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor +inline at::Tensor col2im_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride); +} +namespace symint { + template ::value>> + at::Tensor col2im(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/col2im_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/col2im_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..45be9d7513b5bb8956986f05c2fb9fa94f25b563 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/col2im_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API col2im_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::col2im") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +}; + +struct TORCH_API col2im { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::col2im") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/concat.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/concat.h new file mode 100644 index 0000000000000000000000000000000000000000..da4ba4f8bfcec4bf90504e744b2d7674b09b7ee6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/concat.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::concat(Tensor[] tensors, int dim=0) -> Tensor +inline at::Tensor concat(at::TensorList tensors, int64_t dim=0) { + return at::_ops::concat::call(tensors, dim); +} + +// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & concat_out(at::Tensor & out, at::TensorList tensors, int64_t dim=0) { + return at::_ops::concat_out::call(tensors, dim, out); +} +// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & concat_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) { + return at::_ops::concat_out::call(tensors, dim, out); +} + +// aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor +inline at::Tensor concat(at::TensorList tensors, at::Dimname dim) { + return at::_ops::concat_names::call(tensors, dim); +} + +// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & concat_out(at::Tensor & out, at::TensorList tensors, at::Dimname dim) { + return at::_ops::concat_names_out::call(tensors, dim, out); +} +// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & concat_outf(at::TensorList tensors, at::Dimname dim, at::Tensor & out) { + return at::_ops::concat_names_out::call(tensors, dim, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/concat_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/concat_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2dcdbbd066d8ec8a4c3c5e3a5e4474079d4d24cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/concat_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API concat { + using schema = at::Tensor (at::TensorList, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::concat") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "concat(Tensor[] tensors, int dim=0) -> Tensor") + static at::Tensor call(at::TensorList tensors, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim); +}; + +struct TORCH_API concat_out { + using schema = at::Tensor & (at::TensorList, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::concat") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::TensorList tensors, int64_t dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out); +}; + +struct TORCH_API concat_names { + using schema = at::Tensor (at::TensorList, at::Dimname); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::concat") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "concat.names(Tensor[] tensors, Dimname dim) -> Tensor") + static at::Tensor call(at::TensorList tensors, at::Dimname dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim); +}; + +struct TORCH_API concat_names_out { + using schema = at::Tensor & (at::TensorList, at::Dimname, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::concat") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::TensorList tensors, at::Dimname dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9ba705902773db5c52c07fced97750c5afc8277b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1); +TORCH_API at::Tensor conv3d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1); +TORCH_API at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1); +TORCH_API at::Tensor conv3d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose1d_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose1d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..70205d4480ae29519b59613c218c8bbae697ed4f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose1d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor conv_transpose1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1); +TORCH_API at::Tensor conv_transpose1d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymInt groups=1, c10::SymIntArrayRef dilation=c10::SymInt(1)); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ctc_loss_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ctc_loss_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6870f6a52f111f1135886c433e55c52c58450119 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ctc_loss_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, int64_t reduction=at::Reduction::Mean, bool zero_infinity=false); +TORCH_API at::Tensor ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, int64_t reduction=at::Reduction::Mean, bool zero_infinity=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_relu_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_relu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ff901f9083b3fbf9e7da4319bd1b169d37b0906e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_relu_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & cudnn_convolution_relu_out_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out); +TORCH_API at::Tensor cudnn_convolution_relu(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_scatter_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_scatter_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9cf848832b94612a4882b7205ce03aa12e0009d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_scatter_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & diagonal_scatter_out(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); +TORCH_API at::Tensor diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eq_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eq_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5f42212b202cc466ef89091ad1bdac4576e59a4a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/eq_native.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_eq_Scalar_out : public at::meta::structured_eq_Scalar { +void impl(const at::Tensor & self, const at::Scalar & other, const at::Tensor & out); +}; +TORCH_API at::Tensor eq_scalar_nested(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor eq_quantized_cpu(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & eq_out_quantized_cpu(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +struct TORCH_API structured_eq_Tensor_out : public at::meta::structured_eq_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor eq_quantized_cpu(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & eq_out_quantized_cpu(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_native.h new file mode 100644 index 0000000000000000000000000000000000000000..82694cf0104d38bc7d814a28605b4b50d133cec0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fbgemm_linear_fp16_weight_fp32_activation(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c671663364f991bde0b721a0d43ed4d3934910ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fbgemm_linear_fp16_weight_fp32_activation { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fbgemm_linear_fp16_weight_fp32_activation") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfftn.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfftn.h new file mode 100644 index 0000000000000000000000000000000000000000..2d236a3157004e4b1a100e010c676d5ca18947f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfftn.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +inline at::Tensor fft_rfftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_rfftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } +} + +// aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +inline at::Tensor fft_rfftn_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn::call(self, s, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_rfftn(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn::call(self, s, dim, norm); + } +} + +// aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfftn_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfftn_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfftn_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn_out::call(self, s, dim, norm, out); + } +} + +// aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfftn_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfftn_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfftn_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfftn_out::call(self, s, dim, norm, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill.h new file mode 100644 index 0000000000000000000000000000000000000000..2aad5b3b1789d6ccb9948ab4f411378d44983682 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill.h @@ -0,0 +1,63 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fill.Scalar(Tensor self, Scalar value) -> Tensor +inline at::Tensor fill(const at::Tensor & self, const at::Scalar & value) { + return at::_ops::fill_Scalar::call(self, value); +} + +// aten::fill.Tensor(Tensor self, Tensor value) -> Tensor +inline at::Tensor fill(const at::Tensor & self, const at::Tensor & value) { + return at::_ops::fill_Tensor::call(self, value); +} + +// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) +inline at::Tensor & fill_(at::Tensor & self, const at::Scalar & value) { + return at::_ops::fill__Scalar::call(self, value); +} + +// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) +inline at::Tensor & fill_(at::Tensor & self, const at::Tensor & value) { + return at::_ops::fill__Tensor::call(self, value); +} + +// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fill_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & value) { + return at::_ops::fill_Scalar_out::call(self, value, out); +} +// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fill_outf(const at::Tensor & self, const at::Scalar & value, at::Tensor & out) { + return at::_ops::fill_Scalar_out::call(self, value, out); +} + +// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fill_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & value) { + return at::_ops::fill_Tensor_out::call(self, value, out); +} +// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fill_outf(const at::Tensor & self, const at::Tensor & value, at::Tensor & out) { + return at::_ops::fill_Tensor_out::call(self, value, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0fc3ba5f3cda51fbbd9a088f2d83c2ce3a11ec4f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API from_file { + using schema = at::Tensor (c10::string_view, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::from_file") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(c10::string_view filename, c10::optional shared, c10::optional size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, c10::optional shared, c10::optional size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API from_file_out { + using schema = at::Tensor & (c10::string_view, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::from_file") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(c10::string_view filename, c10::optional shared, c10::optional size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, c10::optional shared, c10::optional size, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler.h new file mode 100644 index 0000000000000000000000000000000000000000..0309d3f821a7d79ef361a514edaa13c68c792e8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor +inline at::Tensor grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler::call(input, grid, interpolation_mode, padding_mode, align_corners); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7c49ae5ee0ef191335930df45ac5822ded668d0f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hspmm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hspmm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out); +}; + +struct TORCH_API hspmm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hspmm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hspmm(Tensor mat1, Tensor mat2) -> Tensor") + static at::Tensor call(const at::Tensor & mat1, const at::Tensor & mat2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..036ec5cf8388536fa56c0f19ef6517cb5fdcc8d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_i0 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dfdf6af5fe56b6775385ac8417045acadbd0308f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a85c98e3663dca80dbb0a09dda38f1210359b424 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1728013043da66604f808b08dc921f5ca5afc218 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linear_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linear_native.h new file mode 100644 index 0000000000000000000000000000000000000000..789ce446ff1e1dc3381fc18e402f9f950a792a08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linear_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linear(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}); +TORCH_API at::Tensor & linear_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::Tensor & out); +TORCH_API at::Tensor nested_linear(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3d916fbe9dd97906ae3ead4e62cbe75691a03f34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor logdet(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8ee8178d952a2e0e027d12c27368194b465b7d66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_cuda_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..52225f1698ab5f577980a103947dc9577c883cc0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor masked_scatter(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); +TORCH_API at::Tensor & masked_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); +TORCH_API at::Tensor & masked_scatter_outf(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..773c588deda076c45abfaefd08924c8fd3525ac7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple max(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); +TORCH_API at::Tensor max(const at::Tensor & self); +TORCH_API at::Tensor & max_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6ea1ec241ee1d96df0745a5c868a2f1c07c264d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool2d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid.h new file mode 100644 index 0000000000000000000000000000000000000000..f60c6e49067eb6e57f734ff40e5976123e7d5bef --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::meshgrid(Tensor[] tensors) -> Tensor[] +inline ::std::vector meshgrid(at::TensorList tensors) { + return at::_ops::meshgrid::call(tensors); +} + +// aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] +inline ::std::vector meshgrid(at::TensorList tensors, c10::string_view indexing) { + return at::_ops::meshgrid_indexing::call(tensors, indexing); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_transpose_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_transpose_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2de30de84f78af4d86347ff90e7030400d26f9a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_transpose_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & miopen_convolution_transpose_out_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out); +TORCH_API at::Tensor miopen_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_depthwise_convolution_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_depthwise_convolution_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1a2547a81a45c0db4852ca30b5e8194c45eebd59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_depthwise_convolution_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & miopen_depthwise_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); +TORCH_API at::Tensor & miopen_depthwise_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out); +TORCH_API at::Tensor & miopen_depthwise_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic); +TORCH_API at::Tensor & miopen_depthwise_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nested_to_padded_tensor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nested_to_padded_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..3eb4b0f86cef4b693e7921662172e3dde314a55d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nested_to_padded_tensor.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor +inline at::Tensor nested_to_padded_tensor(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt) { + return at::_ops::nested_to_padded_tensor::call(self, padding, output_size); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pow_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pow_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1b3190e6074895d2a6737d98df5535129dfba886 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pow_meta_dispatch.h @@ -0,0 +1,33 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor pow(const at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor & pow_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out); +TORCH_API at::Tensor & pow_(at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor pow(const at::Scalar & self, const at::Tensor & exponent); +TORCH_API at::Tensor & pow_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent); +TORCH_API at::Tensor & pow_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out); +TORCH_API at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent); +TORCH_API at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent); +TORCH_API at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out); +TORCH_API at::Tensor & pow_(at::Tensor & self, const at::Scalar & exponent); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b4679bf6ab1bb9884483e93d5b4cdead0e37ef1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step=1, at::TensorOptions options={}); +TORCH_API at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor range(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options={}); +TORCH_API at::Tensor range(const at::Scalar & start, const at::Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & range_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end); +TORCH_API at::Tensor & range_outf(const at::Scalar & start, const at::Scalar & end, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..de0ca02bfe9d4c8f27cda4240736f640a637d46d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API replication_pad1d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::replication_pad1d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); +}; + +struct TORCH_API replication_pad1d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::replication_pad1d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/requires_grad_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/requires_grad_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a59b5e069066c21a0bb96a3b738694fc5e35c382 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/requires_grad_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & requires_grad_(at::Tensor & self, bool requires_grad=true); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1f4c6cd0a813945c3398ed15de5587bea285bff0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor rrelu(const at::Tensor & self, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & rrelu_(at::Tensor & self, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/searchsorted.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/searchsorted.h new file mode 100644 index 0000000000000000000000000000000000000000..e76c6a393968c2c194c1a7380b5050d2b500cac5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/searchsorted.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor +inline at::Tensor searchsorted(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Tensor::call(sorted_sequence, self, out_int32, right, side, sorter); +} + +// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & searchsorted_out(at::Tensor & out, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Tensor_out::call(sorted_sequence, self, out_int32, right, side, sorter, out); +} +// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & searchsorted_outf(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out) { + return at::_ops::searchsorted_Tensor_out::call(sorted_sequence, self, out_int32, right, side, sorter, out); +} + +// aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor +inline at::Tensor searchsorted(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Scalar::call(sorted_sequence, self, out_int32, right, side, sorter); +} + +// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & searchsorted_out(at::Tensor & out, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Scalar_out::call(sorted_sequence, self, out_int32, right, side, sorter, out); +} +// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & searchsorted_outf(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out) { + return at::_ops::searchsorted_Scalar_out::call(sorted_sequence, self, out_int32, right, side, sorter, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/smm_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/smm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9f734516fc449a327784cc677c8e64f39ef13744 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/smm_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API smm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::smm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "smm(Tensor self, Tensor mat2) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & mat2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_backward_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..7436f1a636f3ae9fb7cb7979b1bf278b38260d6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_softplus_backward : public TensorIteratorBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log1p_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log1p_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8fa322c8682ec33497f80df70b99b45ee112ac1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log1p_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor special_log1p(const at::Tensor & self); +TORCH_API at::Tensor & special_log1p_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_log1p_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_copy_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bbf78eb2a7a98599c38a3c7802c095d89d2d610f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API split_copy_Tensor { + using schema = ::std::vector (const at::Tensor &, c10::SymInt, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::split_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]") + static ::std::vector call(const at::Tensor & self, c10::SymInt split_size, int64_t dim); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim); +}; + +struct TORCH_API split_copy_Tensor_out { + using schema = void (const at::Tensor &, c10::SymInt, int64_t, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::split_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()") + static void call(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triu_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..dbe6ebb4c0d9ba16df2939abb84647f282216d10 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/triu_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API triu_ { + using schema = at::Tensor & (at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::triu_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t diagonal); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t diagonal); +}; + +struct TORCH_API triu_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::triu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t diagonal, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out); +}; + +struct TORCH_API triu { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::triu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "triu(Tensor self, int diagonal=0) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t diagonal); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unflatten_dense_tensors.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unflatten_dense_tensors.h new file mode 100644 index 0000000000000000000000000000000000000000..2effb8be1dea6ead7f09c750ddf653037d41cb92 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unflatten_dense_tensors.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[] +inline ::std::vector unflatten_dense_tensors(const at::Tensor & flat, at::TensorList tensors) { + return at::_ops::unflatten_dense_tensors::call(flat, tensors); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unique_consecutive_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unique_consecutive_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d573e12a5973e5eb7c812194d6d4dd7e47e13b36 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unique_consecutive_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple unique_consecutive(const at::Tensor & self, bool return_inverse=false, bool return_counts=false, c10::optional dim=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..30472faa20f97398813785daed2026ce40e602bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor upsample_nearest3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors); +TORCH_API at::Tensor upsample_nearest3d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors); + +} // namespace compositeimplicitautograd +} // namespace at